diff --git a/docs/reference/workflows.md b/docs/reference/workflows.md index e7e921e1e9..2b02ff3e1b 100644 --- a/docs/reference/workflows.md +++ b/docs/reference/workflows.md @@ -8,18 +8,23 @@ Workflows automate multi-step Spec-Driven Development processes — chaining com specify workflow run ``` -| Option | Description | -| ------------------- | -------------------------------------------------------- | -| `-i` / `--input` | Pass input values as `key=value` (repeatable) | +| Option | Description | +| ------------------- | ------------------------------------------------------------------------------------------------ | +| `-i` / `--input` | Pass workflow inputs/parameters as `key=value` (repeatable); `key=@path` reads an existing text file, otherwise `@` values stay literal | +| `--input-file` | Load workflow inputs/parameters from a JSON object file with string, number, or boolean values; repeatable `--input` values override file values | -Runs a workflow from a catalog ID, URL, or local file path. Inputs declared by the workflow can be provided via `--input` or will be prompted interactively. +Runs a workflow from an installed workflow ID or a local `.yml`/`.yaml` file path. Inputs/parameters declared by the workflow can be provided via `--input` or `--input-file`, or will be prompted interactively. Example: ```bash -specify workflow run speckit -i spec="Build a kanban board with drag-and-drop task management" -i scope=full +specify workflow run ./workflow.yml -i prompt="Build a workflow" -i scope=full +specify workflow run ./workflow.yml --input prompt=@docs/prompt.md +specify workflow run ./workflow.yml --input-file payload.json -i scope=full ``` +For boolean, number, and enum-constrained inputs, surrounding whitespace from file-backed string values is trimmed before normal workflow input coercion. Free-form string inputs preserve file contents. + > **Note:** All workflow commands require a project already initialized with `specify init`. ## Resume a Workflow diff --git a/src/specify_cli/__init__.py b/src/specify_cli/__init__.py index ccd670d20e..78f18cac33 100644 --- a/src/specify_cli/__init__.py +++ b/src/specify_cli/__init__.py @@ -34,6 +34,7 @@ import shutil import json import json5 +import math import stat import shlex import urllib.error @@ -5257,11 +5258,163 @@ def extension_set_priority( workflow_app.add_typer(workflow_catalog_app, name="catalog") +def _resolve_workflow_cli_path(raw_path: str) -> Path: + """Resolve workflow CLI file paths from the current working directory.""" + path = Path(raw_path).expanduser() + if not path.is_absolute(): + path = Path.cwd() / path + return path + + +def _read_workflow_cli_file(raw_path: str, description: str) -> tuple[Path, str]: + """Read a text file referenced by a workflow CLI input option.""" + cleaned_path = raw_path.strip() + if not cleaned_path: + raise ValueError(f"Missing file path for {description}.") + + path = _resolve_workflow_cli_path(cleaned_path) + if not path.exists(): + raise ValueError(f"File for {description} not found: {path}") + if not path.is_file(): + raise ValueError(f"Path for {description} is not a file: {path}") + + try: + return path, path.read_text(encoding="utf-8") + except UnicodeDecodeError as exc: + raise ValueError( + f"Unable to read file for {description} as UTF-8 text: {path}" + ) from exc + except OSError as exc: + raise ValueError( + f"Unable to read file for {description}: {path} ({exc})" + ) from exc + + +def _json_type_name(value: Any) -> str: + """Return a user-facing JSON type name for validation errors.""" + if value is None: + return "null" + if isinstance(value, dict): + return "object" + if isinstance(value, list): + return "array" + if isinstance(value, bool): + return "boolean" + if isinstance(value, (int, float)): + return "number" + if isinstance(value, str): + return "string" + return type(value).__name__ + + +def _validate_workflow_input_file_value(key: str, value: Any) -> None: + """Ensure --input-file values match the supported workflow input scalars.""" + if isinstance(value, float) and not math.isfinite(value): + raise ValueError( + f"--input-file value for {key!r} must be a finite number." + ) + if not isinstance(value, (str, int, float, bool)): + raise ValueError( + f"--input-file value for {key!r} must be a string, number, " + f"or boolean, got {_json_type_name(value)}." + ) + + +def _load_workflow_input_file(input_file: str) -> dict[str, Any]: + """Load workflow inputs from a JSON object file.""" + path, raw_json = _read_workflow_cli_file(input_file, "--input-file") + try: + data = json.loads(raw_json) + except json.JSONDecodeError as exc: + raise ValueError( + f"Invalid JSON in --input-file {path}: " + f"{exc.msg} at line {exc.lineno}, column {exc.colno}" + ) from exc + + if not isinstance(data, dict): + raise ValueError( + f"--input-file must contain a JSON object, got {type(data).__name__}." + ) + for key, value in data.items(): + _validate_workflow_input_file_value(str(key), value) + return data + + +def _normalize_workflow_cli_scalar( + value: Any, + input_def: dict[str, Any] | None, +) -> Any: + """Normalize file-backed scalars when workflow coercion expects scalars.""" + if not isinstance(value, str) or not isinstance(input_def, dict): + return value + + input_type = input_def.get("type", "string") + if input_type in ("number", "boolean") or input_def.get("enum") is not None: + return value.strip() + return value + + +def _parse_workflow_inputs( + input_values: list[str] | None, + input_file: str | None, + input_definitions: dict[str, Any] | None = None, +) -> dict[str, Any]: + """Normalize workflow CLI input options into the engine input dict.""" + inputs: dict[str, Any] = {} + input_definitions = input_definitions or {} + + if input_file is not None: + for key, value in _load_workflow_input_file(input_file).items(): + inputs[key] = _normalize_workflow_cli_scalar( + value, + input_definitions.get(key), + ) + + if input_values: + for kv in input_values: + if "=" not in kv: + raise ValueError( + f"Invalid input format: {kv!r} (expected key=value)" + ) + key, _, raw_value = kv.partition("=") + key = key.strip() + if not key: + raise ValueError( + f"Invalid input format: {kv!r} (key cannot be empty)" + ) + + value = raw_value.strip() + if value.startswith("@"): + file_ref = value[1:].strip() + if file_ref: + candidate_path = _resolve_workflow_cli_path(file_ref) + if candidate_path.exists() and candidate_path.is_file(): + _, value = _read_workflow_cli_file( + file_ref, f"input {key!r}" + ) + value = _normalize_workflow_cli_scalar( + value, + input_definitions.get(key), + ) + inputs[key] = value + + return inputs + + @workflow_app.command("run") def workflow_run( source: str = typer.Argument(..., help="Workflow ID or YAML file path"), input_values: list[str] | None = typer.Option( - None, "--input", "-i", help="Input values as key=value pairs" + None, + "--input", + "-i", + help=( + "Input values as key=value pairs; key=@path reads an existing text " + "file, otherwise @ values stay literal" + ), + ), + input_file: str | None = typer.Option( + None, "--input-file", help="Load input values from a JSON object file" ), ): """Run a workflow from an installed ID or local YAML path.""" @@ -5288,15 +5441,11 @@ def workflow_run( console.print(f" • {err}") raise typer.Exit(1) - # Parse inputs - inputs: dict[str, Any] = {} - if input_values: - for kv in input_values: - if "=" not in kv: - console.print(f"[red]Error:[/red] Invalid input format: {kv!r} (expected key=value)") - raise typer.Exit(1) - key, _, value = kv.partition("=") - inputs[key.strip()] = value.strip() + try: + inputs = _parse_workflow_inputs(input_values, input_file, definition.inputs) + except ValueError as exc: + console.print(f"[red]Error:[/red] {exc}") + raise typer.Exit(1) console.print(f"\n[bold cyan]Running workflow:[/bold cyan] {definition.name} ({definition.id})") console.print(f"[dim]Version: {definition.version}[/dim]\n") diff --git a/tests/test_workflows.py b/tests/test_workflows.py index 4c042fc7d5..1128aacf36 100644 --- a/tests/test_workflows.py +++ b/tests/test_workflows.py @@ -84,6 +84,320 @@ def sample_workflow_file(project_dir, sample_workflow_yaml): return wf_path +# ===== Workflow CLI Input Tests ===== + +class TestWorkflowCliInputs: + """Test workflow run input normalization at the CLI boundary.""" + + def test_inline_input_still_works(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs( + ["spec=Build a kanban board", "scope=full"], + None, + ) + + assert inputs == { + "spec": "Build a kanban board", + "scope": "full", + } + + def test_at_file_input_reads_file_contents_for_generic_key( + self, + project_dir, + monkeypatch, + ): + from specify_cli import _parse_workflow_inputs + + desc_file = project_dir / "desc.md" + desc_text = "# Description\n\nBuild a workflow.\n" + desc_file.write_text(desc_text, encoding="utf-8") + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs(["description=@desc.md"], None) + + assert inputs == {"description": desc_text} + + def test_at_file_input_normalizes_typed_scalars( + self, + project_dir, + monkeypatch, + ): + from specify_cli import _parse_workflow_inputs + + (project_dir / "enabled.txt").write_text("true\n", encoding="utf-8") + (project_dir / "scope.txt").write_text("full\n", encoding="utf-8") + (project_dir / "notes.md").write_text("line one\n", encoding="utf-8") + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs( + ["enabled=@enabled.txt", "scope=@scope.txt", "notes=@notes.md"], + None, + { + "enabled": {"type": "boolean"}, + "scope": {"type": "string", "enum": ["full", "minimal"]}, + "notes": {"type": "string"}, + }, + ) + + assert inputs == { + "enabled": "true", + "scope": "full", + "notes": "line one\n", + } + + @pytest.mark.parametrize("literal", ["@alice", "@"]) + def test_missing_at_file_stays_literal(self, literal, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs([f"assignee={literal}"], None) + + assert inputs == {"assignee": literal} + + def test_existing_at_directory_stays_literal(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + (project_dir / "some_existing_directory").mkdir() + monkeypatch.chdir(project_dir) + + assert _parse_workflow_inputs(["x=@."], None) == {"x": "@."} + assert _parse_workflow_inputs( + ["x=@some_existing_directory"], + None, + ) == {"x": "@some_existing_directory"} + + def test_missing_input_file_fails_cleanly(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + monkeypatch.chdir(project_dir) + + with pytest.raises(ValueError, match="not found"): + _parse_workflow_inputs(None, "missing.json") + + def test_input_file_directory_fails_cleanly(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + (project_dir / "payload.json").mkdir() + monkeypatch.chdir(project_dir) + + with pytest.raises(ValueError, match="not a file"): + _parse_workflow_inputs(None, "payload.json") + + def test_input_file_loads_json_object(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text( + json.dumps({"prompt": "Build a workflow", "scope": "full"}), + encoding="utf-8", + ) + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs(None, "payload.json") + + assert inputs == { + "prompt": "Build a workflow", + "scope": "full", + } + + def test_input_file_normalizes_typed_string_scalars( + self, + project_dir, + monkeypatch, + ): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text( + json.dumps({ + "enabled": "true\n", + "scope": "full\n", + "prompt": "Keep trailing newline\n", + }), + encoding="utf-8", + ) + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs( + None, + "payload.json", + { + "enabled": {"type": "boolean"}, + "scope": {"type": "string", "enum": ["full", "minimal"]}, + "prompt": {"type": "string"}, + }, + ) + + assert inputs == { + "enabled": "true", + "scope": "full", + "prompt": "Keep trailing newline\n", + } + + def test_direct_input_overrides_input_file(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text( + json.dumps({"prompt": "Build a workflow", "scope": "full"}), + encoding="utf-8", + ) + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs(["scope=minimal"], "payload.json") + + assert inputs == { + "prompt": "Build a workflow", + "scope": "minimal", + } + + def test_invalid_json_input_file_fails_cleanly(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text("{invalid json", encoding="utf-8") + monkeypatch.chdir(project_dir) + + with pytest.raises(ValueError, match="Invalid JSON"): + _parse_workflow_inputs(None, "payload.json") + + @pytest.mark.parametrize("payload", ["[]", '"not an object"']) + def test_non_object_json_input_file_fails_cleanly( + self, + payload, + project_dir, + monkeypatch, + ): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text(payload, encoding="utf-8") + monkeypatch.chdir(project_dir) + + with pytest.raises(ValueError, match="JSON object"): + _parse_workflow_inputs(None, "payload.json") + + @pytest.mark.parametrize( + "payload", + [ + {"spec": {"text": "Build a workflow"}}, + {"spec": ["Build a workflow"]}, + {"spec": None}, + ], + ) + def test_non_scalar_json_input_file_values_fail_cleanly( + self, + payload, + project_dir, + monkeypatch, + ): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text(json.dumps(payload), encoding="utf-8") + monkeypatch.chdir(project_dir) + + with pytest.raises(ValueError, match="string, number, or boolean"): + _parse_workflow_inputs(None, "payload.json") + + @pytest.mark.parametrize( + "payload", + ['{"spec": NaN}', '{"spec": Infinity}', '{"spec": 1e999}'], + ) + def test_non_finite_json_input_file_numbers_fail_cleanly( + self, + payload, + project_dir, + monkeypatch, + ): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text(payload, encoding="utf-8") + monkeypatch.chdir(project_dir) + + with pytest.raises(ValueError, match="finite number"): + _parse_workflow_inputs(None, "payload.json") + + def test_malformed_inline_input_fails_cleanly(self): + from specify_cli import _parse_workflow_inputs + + with pytest.raises(ValueError, match="expected key=value"): + _parse_workflow_inputs(["spec"], None) + + def test_workflow_run_passes_normalized_inputs_to_engine( + self, + project_dir, + monkeypatch, + ): + from typer.testing import CliRunner + from specify_cli import app + from specify_cli.workflows import engine as engine_module + + payload_file = project_dir / "payload.json" + payload_file.write_text( + json.dumps({"spec": "Build a kanban board", "scope": "minimal"}), + encoding="utf-8", + ) + captured: dict[str, object] = {} + + class FakeDefinition: + id = "speckit" + name = "Spec Kit" + version = "1.0.0" + inputs = {} + + class FakeStatus: + value = "completed" + + class FakeState: + status = FakeStatus() + run_id = "run-1" + + class FakeWorkflowEngine: + def __init__(self, project_root): + self.project_root = project_root + self.on_step_start = None + + def load_workflow(self, source): + captured["source"] = source + return FakeDefinition() + + def validate(self, definition): + return [] + + def execute(self, definition, inputs): + captured["inputs"] = inputs + return FakeState() + + monkeypatch.setattr(engine_module, "WorkflowEngine", FakeWorkflowEngine) + monkeypatch.chdir(project_dir) + + result = CliRunner().invoke( + app, + [ + "workflow", + "run", + "speckit", + "--input-file", + "payload.json", + "--input", + "scope=full", + ], + ) + + assert result.exit_code == 0, result.output + assert captured["source"] == "speckit" + assert captured["inputs"] == { + "spec": "Build a kanban board", + "scope": "full", + } + + # ===== Step Registry Tests ===== class TestStepRegistry: