Added argument parser

Fortune for enumerate-ordered's current commit: Blessing − 吉
master
Avril 2 years ago
parent e5c29eb368
commit dd5c20282b
Signed by: flanchan
GPG Key ID: 284488987C31F630

@ -21,8 +21,9 @@ use futures::{
#[derive(Debug, Default, Clone)] #[derive(Debug, Default, Clone)]
pub struct Args pub struct Args
{ {
walker: walk::Config, pub reverse: bool,
worker: work::Config, pub walker: walk::Config,
pub worker: work::Config,
paths: Option<Vec<PathBuf>>, paths: Option<Vec<PathBuf>>,
} }
@ -258,6 +259,15 @@ where I: Iterator<Item = String>
}; };
} }
// Modes //
// --help
if input == Arg::Long("help") {
return Ok(Some(Mode::Help));
}
// Normal //
// -r, --recursive <limit> // -r, --recursive <limit>
if input.is_any(args![b'r', "recursive"]) { if input.is_any(args![b'r', "recursive"]) {
output.walker.recursion_depth = if input.is_long() { output.walker.recursion_depth = if input.is_long() {
@ -273,16 +283,18 @@ where I: Iterator<Item = String>
}; };
} }
// -a, -m, -c, --{a,m,c}time
if input.is_any(args![b'a', "atime"]) { if input.is_any(args![b'a', "atime"]) {
output.worker.by = work::OrderBy::AccessTime; output.worker.by = work::OrderBy::AccessTime;
} } else if input.is_any(args![b'c', "ctime"]) {
if input.is_any(args![b'c', "ctime"]) {
output.worker.by = work::OrderBy::CreationTime; output.worker.by = work::OrderBy::CreationTime;
} } else if input.is_any(args![b'm', "mtime"]) {
if input.is_any(args![b'm', "mtime"]) {
output.worker.by = work::OrderBy::ModifiedTime; output.worker.by = work::OrderBy::ModifiedTime;
} }
// -n, --reverse
output.reverse = input.is_any(args![b'n', "reverse"]);
// -P, -p, --parallel cpus|<max> // -P, -p, --parallel cpus|<max>
// -1 // -1
if input.is_any(args![b'P', b'p', "parallel"]) { if input.is_any(args![b'P', b'p', "parallel"]) {

@ -59,7 +59,7 @@ where I: futures::stream::Stream<Item = P>,
fn print_help<W: ?Sized>(to: &mut W) -> std::io::Result<()> fn print_help<W: ?Sized>(to: &mut W) -> std::io::Result<()>
where W: std::io::Write, where W: std::io::Write,
{ {
let execp = args::prog_name(); let execp = args::prog_name();
writeln!(to, "{} v{} - {}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION"), env!("CARGO_PKG_DESCRIPTION"))?; writeln!(to, "{} v{} - {}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION"), env!("CARGO_PKG_DESCRIPTION"))?;
@ -74,8 +74,8 @@ fn print_help<W: ?Sized>(to: &mut W) -> std::io::Result<()>
macro_rules! write_opt { macro_rules! write_opt {
($($name:literal),+ => $explain:literal $(, $format:expr)*) => { ($($name:literal),+ => $explain:literal $(, $format:expr)*) => {
{ {
let names = [$($name),+].into_iter().fold(String::default(), |prev, n| format!("{prev}, {n}")); let names = [$($name),+].into_iter().fold(String::default(), |prev, n| if prev.is_empty() { n.to_string() } else { format!("{prev}, {n}") });
writeln!(to, concat!(" {}\t\t", $explain), names $(, $format)*) writeln!(to, concat!(" {}\t", $explain), names $(, $format)*)
} }
}; };
} }
@ -84,6 +84,7 @@ fn print_help<W: ?Sized>(to: &mut W) -> std::io::Result<()>
write_opt!("-a", "--atime" => "Sort by atime")?; write_opt!("-a", "--atime" => "Sort by atime")?;
write_opt!("-c", "--ctime" => "Sort by ctime (default)")?; write_opt!("-c", "--ctime" => "Sort by ctime (default)")?;
write_opt!("-m", "--mtime" => "Sort by mtime")?; write_opt!("-m", "--mtime" => "Sort by mtime")?;
write_opt!("-n", "--reverse" => "Print output in reverse")?;
write_opt!("-p", "--parallel cpus|<max tasks>" => "Run tasks in parallel, with a max number of tasks being equal `<max tasks>`, or, if 0, to infinity (see `-P`), if 'cpus', to the number of logical CPU cores ({}, default)", *walk::NUM_CPUS)?; write_opt!("-p", "--parallel cpus|<max tasks>" => "Run tasks in parallel, with a max number of tasks being equal `<max tasks>`, or, if 0, to infinity (see `-P`), if 'cpus', to the number of logical CPU cores ({}, default)", *walk::NUM_CPUS)?;
write_opt!("-P", "--parallel 0" => "Run tasks with unbounded parallelism, no limit to the number of walker tasks running at once (note: the physical thread pool will always be the same size regardless of these flags)")?; write_opt!("-P", "--parallel 0" => "Run tasks with unbounded parallelism, no limit to the number of walker tasks running at once (note: the physical thread pool will always be the same size regardless of these flags)")?;
write_opt!("-1", "--parallel 1" => "Only let one directory be processed at once")?; write_opt!("-1", "--parallel 1" => "Only let one directory be processed at once")?;
@ -123,24 +124,24 @@ async fn main() -> eyre::Result<()> {
debug!("Parsed args: {:?}", args); debug!("Parsed args: {:?}", args);
let worker_cfg = { let worker_cfg = {
//TODO: Read worker config from main config //Read worker config from main config
std::sync::Arc::new(work::Config::default()) std::sync::Arc::new(args.worker.clone())
}; };
let walker_cfg = { let walker_cfg = {
//TODO: Read walker config from main config //Read walker config from main config
walk::Config::default() args.walker.clone()
}; };
// Spin up ordering task. // Spin up ordering task.
let ordering = { let ordering = {
let cfg = (*worker_cfg).clone(); let cfg = (*worker_cfg).clone();
tokio::spawn(async move { tokio::spawn(async move {
trace!("spun up ordering backing thread"); trace!("spun up ordering backing thread with config: {:?}", &cfg);
work_on(cfg, rx).await //TODO: Parse config from args work_on(cfg, rx).await //TODO: Parse config from args
}) })
}; };
trace!("Starting recursive walk of input locations"); trace!("Starting recursive walk of input locations with config: {:?}", &walker_cfg);
//TODO: Trace directory trees from paths in `args` and/or `stdin` and pass results to `tx` //TODO: Trace directory trees from paths in `args` and/or `stdin` and pass results to `tx`
let walk = walk_paths(args.paths(), walker_cfg, &worker_cfg, tx); let walk = walk_paths(args.paths(), walker_cfg, &worker_cfg, tx);
tokio::pin!(walk); tokio::pin!(walk);
@ -175,15 +176,29 @@ async fn main() -> eyre::Result<()> {
let lock = std::io::stdout().lock(); let lock = std::io::stdout().lock();
std::io::BufWriter::new(lock) std::io::BufWriter::new(lock)
}; };
trace!("Writing ordered results to stdout... (buffered, sync)"); trace!("Writing ordered results to stdout... (buffered, sync, rev: {})", args.reverse);
for info in set.into_iter()
#[inline]
fn operate_on<W: ?Sized, I>(stdout: &mut W, set: I) -> eyre::Result<()>
where W: Write,
I: IntoIterator<Item = work::FileInfo> + ExactSizeIterator + DoubleEndedIterator + std::iter::FusedIterator + 'static
{ {
stdout.write_all(info.path().as_os_str().as_bytes()) for info in set
.and_then(|_| stdout.write_all(&[b'\n'])) {
.wrap_err("Failed to write raw pathname for entry to stdout") stdout.write_all(info.path().as_os_str().as_bytes())
.with_context(|| format!("{:?}", info.path()).header("Pathname was"))?; .and_then(|_| stdout.write_all(&[b'\n']))
.wrap_err("Failed to write raw pathname for entry to stdout")
.with_context(|| format!("{:?}", info.path()).header("Pathname was"))?;
}
Ok(())
} }
if args.reverse {
operate_on(&mut stdout, set.into_iter().rev())
} else {
operate_on(&mut stdout, set.into_iter())
}.wrap_err("Abandoning output write due to failure")?;
stdout.flush().wrap_err("Failed to flush buffered output to stdout")?; stdout.flush().wrap_err("Failed to flush buffered output to stdout")?;
Ok(()) Ok(())
}).await.wrap_err("Writer (blocking) task panic")? }).await.wrap_err("Writer (blocking) task panic")?

Loading…
Cancel
Save