diff --git a/CHANGELOG.md b/CHANGELOG.md index 56b6a9a..826eefd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed - Upcoming fixes will be listed here +## [0.1.3] - 2025-08-19 + +### Fixed +- CLI crash on `--help` due to unsupported `Optional[typer.Context]` annotation. Refactored callback to require `Context` and added an internal wrapper for tests. + +### Changed +- Documentation updates clarifying correct usage of global options and subcommands. Added examples for `nl` subcommand and a note to avoid invoking the binary twice. + ## [0.1.0] - 2024-01-XX ### Added diff --git a/README.md b/README.md index b813b1a..4f1c208 100644 --- a/README.md +++ b/README.md @@ -137,6 +137,22 @@ aiclip --timeout 120 "complex processing task" aiclip --verbose "your command" ``` +### Subcommands and option placement + +You can also use the explicit `nl` subcommand. Put global options before the subcommand: + +```bash +aiclip --yes nl "thumbnail at 10s from test.mp4" +aiclip --dry-run --model gpt-4o-mini nl "compress input.mp4" +``` + +Do not invoke the binary twice: + +```bash +# Incorrect +aiclip aiclip --yes nl "..." +``` + ## 🔧 Configuration aiclip uses environment variables and `.env` files for configuration: diff --git a/docs/usage.md b/docs/usage.md index e3bb68c..f3cb9d8 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -10,3 +10,22 @@ Options: - `--yes`: skip confirmation - `--dry-run`: preview only - `--model gpt-4o-mini`: pick model + +## Subcommand mode + +Global options must come before the subcommand. The primary subcommand is `nl` (natural language): + +```bash +# Run with confirmation skipped +aiclip --yes nl "thumbnail at 10s from test.mp4" + +# Preview only and override model +aiclip --dry-run --model gpt-4o-mini nl "compress input.mp4" +``` + +Avoid invoking the binary twice: + +```bash +# Incorrect +aiclip aiclip --yes nl "..." +``` diff --git a/pyproject.toml b/pyproject.toml index 561f63d..8bda090 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "ai-ffmpeg-cli" -version = "0.1.2" +version = "0.1.3" description = "AI-powered CLI that translates natural language to safe ffmpeg commands" readme = { file = "README.md", content-type = "text/markdown" } license = { file = "LICENSE" } diff --git a/src/ai_ffmpeg_cli/main.py b/src/ai_ffmpeg_cli/main.py index ae630c5..206c7ab 100644 --- a/src/ai_ffmpeg_cli/main.py +++ b/src/ai_ffmpeg_cli/main.py @@ -26,17 +26,14 @@ def _setup_logging(verbose: bool) -> None: logging.basicConfig(level=level, format="%(levelname)s: %(message)s") -@app.callback() -def main( - ctx: typer.Context | None = None, - prompt: str | None = typer.Argument( - None, help="Natural language prompt; if provided, runs once and exits" - ), - yes: bool = typer.Option(False, "--yes/--no-yes", help="Skip confirmation and overwrite"), - model: str | None = typer.Option(None, "--model", help="LLM model override"), - dry_run: bool = typer.Option(None, "--dry-run/--no-dry-run", help="Preview only"), - timeout: int = typer.Option(60, "--timeout", help="LLM timeout seconds"), - verbose: bool = typer.Option(False, "--verbose", help="Verbose logging"), +def _main_impl( + ctx: typer.Context | None, + prompt: str | None, + yes: bool, + model: str | None, + dry_run: bool | None, + timeout: int, + verbose: bool, ) -> None: """Initialize global options and optionally run one-shot prompt.""" _setup_logging(verbose) @@ -84,6 +81,33 @@ def main( raise typer.Exit(1) from e +@app.callback() +def cli_main( + ctx: typer.Context, + prompt: str | None = typer.Argument( + None, help="Natural language prompt; if provided, runs once and exits" + ), + yes: bool = typer.Option(False, "--yes/--no-yes", help="Skip confirmation and overwrite"), + model: str | None = typer.Option(None, "--model", help="LLM model override"), + dry_run: bool = typer.Option(None, "--dry-run/--no-dry-run", help="Preview only"), + timeout: int = typer.Option(60, "--timeout", help="LLM timeout seconds"), + verbose: bool = typer.Option(False, "--verbose", help="Verbose logging"), +) -> None: + _main_impl(ctx, prompt, yes, model, dry_run, timeout, verbose) + + +def main( + ctx: typer.Context | None = None, + prompt: str | None = None, + yes: bool = False, + model: str | None = None, + dry_run: bool | None = None, + timeout: int = 60, + verbose: bool = False, +) -> None: + _main_impl(ctx, prompt, yes, model, dry_run, timeout, verbose) + + def _make_llm(cfg: AppConfig) -> LLMClient: if not cfg.openai_api_key: raise ConfigError(