Release: 0.1.3 – fix Typer Context crash; update docs on usage/subcommands

This commit is contained in:
d-k-patel
2025-08-19 16:13:35 +05:30
parent 54cc7252d6
commit e2bf5f423b
5 changed files with 79 additions and 12 deletions

View File

@@ -16,6 +16,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Fixed
- Upcoming fixes will be listed here
## [0.1.3] - 2025-08-19
### Fixed
- CLI crash on `--help` due to unsupported `Optional[typer.Context]` annotation. Refactored callback to require `Context` and added an internal wrapper for tests.
### Changed
- Documentation updates clarifying correct usage of global options and subcommands. Added examples for `nl` subcommand and a note to avoid invoking the binary twice.
## [0.1.0] - 2024-01-XX
### Added

View File

@@ -137,6 +137,22 @@ aiclip --timeout 120 "complex processing task"
aiclip --verbose "your command"
```
### Subcommands and option placement
You can also use the explicit `nl` subcommand. Put global options before the subcommand:
```bash
aiclip --yes nl "thumbnail at 10s from test.mp4"
aiclip --dry-run --model gpt-4o-mini nl "compress input.mp4"
```
Do not invoke the binary twice:
```bash
# Incorrect
aiclip aiclip --yes nl "..."
```
## 🔧 Configuration
aiclip uses environment variables and `.env` files for configuration:

View File

@@ -10,3 +10,22 @@ Options:
- `--yes`: skip confirmation
- `--dry-run`: preview only
- `--model gpt-4o-mini`: pick model
## Subcommand mode
Global options must come before the subcommand. The primary subcommand is `nl` (natural language):
```bash
# Run with confirmation skipped
aiclip --yes nl "thumbnail at 10s from test.mp4"
# Preview only and override model
aiclip --dry-run --model gpt-4o-mini nl "compress input.mp4"
```
Avoid invoking the binary twice:
```bash
# Incorrect
aiclip aiclip --yes nl "..."
```

View File

@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
[project]
name = "ai-ffmpeg-cli"
version = "0.1.2"
version = "0.1.3"
description = "AI-powered CLI that translates natural language to safe ffmpeg commands"
readme = { file = "README.md", content-type = "text/markdown" }
license = { file = "LICENSE" }

View File

@@ -26,17 +26,14 @@ def _setup_logging(verbose: bool) -> None:
logging.basicConfig(level=level, format="%(levelname)s: %(message)s")
@app.callback()
def main(
ctx: typer.Context | None = None,
prompt: str | None = typer.Argument(
None, help="Natural language prompt; if provided, runs once and exits"
),
yes: bool = typer.Option(False, "--yes/--no-yes", help="Skip confirmation and overwrite"),
model: str | None = typer.Option(None, "--model", help="LLM model override"),
dry_run: bool = typer.Option(None, "--dry-run/--no-dry-run", help="Preview only"),
timeout: int = typer.Option(60, "--timeout", help="LLM timeout seconds"),
verbose: bool = typer.Option(False, "--verbose", help="Verbose logging"),
def _main_impl(
ctx: typer.Context | None,
prompt: str | None,
yes: bool,
model: str | None,
dry_run: bool | None,
timeout: int,
verbose: bool,
) -> None:
"""Initialize global options and optionally run one-shot prompt."""
_setup_logging(verbose)
@@ -84,6 +81,33 @@ def main(
raise typer.Exit(1) from e
@app.callback()
def cli_main(
ctx: typer.Context,
prompt: str | None = typer.Argument(
None, help="Natural language prompt; if provided, runs once and exits"
),
yes: bool = typer.Option(False, "--yes/--no-yes", help="Skip confirmation and overwrite"),
model: str | None = typer.Option(None, "--model", help="LLM model override"),
dry_run: bool = typer.Option(None, "--dry-run/--no-dry-run", help="Preview only"),
timeout: int = typer.Option(60, "--timeout", help="LLM timeout seconds"),
verbose: bool = typer.Option(False, "--verbose", help="Verbose logging"),
) -> None:
_main_impl(ctx, prompt, yes, model, dry_run, timeout, verbose)
def main(
ctx: typer.Context | None = None,
prompt: str | None = None,
yes: bool = False,
model: str | None = None,
dry_run: bool | None = None,
timeout: int = 60,
verbose: bool = False,
) -> None:
_main_impl(ctx, prompt, yes, model, dry_run, timeout, verbose)
def _make_llm(cfg: AppConfig) -> LLMClient:
if not cfg.openai_api_key:
raise ConfigError(