diff --git a/.DS_Store b/.DS_Store deleted file mode 100644 index e6184751..00000000 Binary files a/.DS_Store and /dev/null differ diff --git a/.gitignore b/.gitignore index 3578e26f..2c94c44a 100644 --- a/.gitignore +++ b/.gitignore @@ -29,6 +29,8 @@ docs/** !docs/command-overview.md !docs/qoder-directory.md +.github/copilot-instructions.md + # Generated by cargo mutants # Contains mutation testing data **/mutants.out*/ @@ -44,4 +46,12 @@ syncable-ide-companion/*.vsix syncable-ide-companion/node_modules/ syncable-ide-companion/dist/ -syncable-cli.tape \ No newline at end of file +.DS_Store +**/.DS_Store + +# Generated CD/CI pipeline output (sync-ctl generate cd/ci) +.github/workflows/deploy-azure.yml +.github/workflows/deploy-gcp.yml +.github/workflows/deploy-hetzner.yml +.syncable/cd-manifest.toml +.syncable/SECRETS_REQUIRED.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 2cd90203..67303e0d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,97 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased] +## [0.37.1](https://github.com/syncable-dev/syncable-cli/compare/v0.37.0...v0.37.1) - 2026-03-29 + +### Added + +- deprecate chat/agent commands, rebrand CLI as DevOps toolbox +- updated codex/gemini skill path + +### Fixed + +- install Codex skills to ~/.agents/skills/ per official docs +- rewrite Gemini CLI skill installer to use proper SKILL.md directory format +- rewrite skill descriptions for semantic matching, skip CI for non-Rust changes + +### Other + +- release v0.37.0 +- *(installer)* update README with correct install paths for Claude, Codex, Gemini + +## [0.37.1](https://github.com/syncable-dev/syncable-cli/compare/v0.37.0...v0.37.1) - 2026-03-29 + +### Added + +- deprecate chat/agent commands, rebrand CLI as DevOps toolbox +- updated codex/gemini skill path + +### Fixed + +- install Codex skills to ~/.agents/skills/ per official docs +- rewrite Gemini CLI skill installer to use proper SKILL.md directory format +- rewrite skill descriptions for semantic matching, skip CI for non-Rust changes + +### Other + +- *(installer)* update README with correct install paths for Claude, Codex, Gemini + +## [0.37.0](https://github.com/syncable-dev/syncable-cli/compare/v0.36.0...v0.37.0) - 2026-03-28 + +### Added + +- wire validate command, fix per-directory vuln/dep scanning, add deploy preview/run, and pagination +- updating test cases +- removed .env +- claude skills feature +- rewrite command skills to use --agent flag +- rewrite workflow skills with --agent and cross-step retrieval +- wire --agent flag in command handlers and add Retrieve command +- add --agent flag to 5 scan commands and Retrieve subcommand +- add CLI variants of compression functions +- add resolve_latest() for cross-process ref_id resolution +- *(installer)* add CLI entrypoint with commander, inquirer, ora, chalk +- *(installer)* add update command (re-exports uninstall + install) +- *(installer)* add status command with per-agent skill counting +- *(installer)* add uninstall command with glob removal and Gemini marker cleanup +- *(installer)* add install command with skill writers for all 5 agents +- *(installer)* add prerequisite check and installation modules +- *(installer)* add Cursor, Windsurf, and Gemini format transformers +- *(installer)* add Claude and Codex format transformers +- *(installer)* add agent detection for 5 AI coding agents +- *(installer)* add skill loader with frontmatter parsing +- *(installer)* add constants and utils module with version parsing +- *(skills)* add syncable-deploy-pipeline workflow skill +- *(skills)* add syncable-iac-pipeline workflow skill +- *(skills)* add syncable-security-audit workflow skill +- *(skills)* add syncable-project-assessment workflow skill +- *(skills)* add syncable-platform command skill +- *(skills)* add syncable-optimize command skill +- *(skills)* add syncable-validate command skill +- *(skills)* add syncable-dependencies command skill +- *(skills)* add syncable-vulnerabilities command skill +- *(skills)* add syncable-security command skill +- *(skills)* add syncable-analyze command skill +- early agu-ui protocol added + +### Fixed + +- add failures/diagnostics fields to find_issues_array +- *(installer)* add verbose logging, forward all flags in update command + +### Other + +- ignore 6 new transitive dependency advisories (aws-lc-sys, rustls-webpki) +- agent output pipeline implementation plan (10 tasks) +- agent output pipeline design spec +- *(installer)* add professional npm README with logo and metadata +- *(installer)* scaffold npx installer project +- add npx installer implementation plan +- add npx installer design spec +- scaffold skills directory structure +- add syncable CLI skills implementation plan +- add syncable CLI skills design spec + ## [0.36.0](https://github.com/syncable-dev/syncable-cli/compare/v0.35.1...v0.36.0) - 2026-03-15 ### Fixed diff --git a/Cargo.lock b/Cargo.lock index 8f5435e7..d3514074 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -966,7 +966,7 @@ version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "faf9468729b8cbcea668e36183cb69d317348c2e08e994829fb56ebfdfbaac34" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -1423,7 +1423,7 @@ dependencies = [ "libc", "option-ext", "redox_users 0.5.2", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -1564,7 +1564,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -3084,7 +3084,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2 0.5.10", + "socket2 0.6.2", "system-configuration", "tokio", "tower-service", @@ -3300,6 +3300,19 @@ dependencies = [ "unicode-width 0.2.2", ] +[[package]] +name = "insta" +version = "1.47.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4a6248eb93a4401ed2f37dfe8ea592d3cf05b7cf4f8efa867b6895af7e094e" +dependencies = [ + "console", + "once_cell", + "serde", + "similar", + "tempfile", +] + [[package]] name = "io-close" version = "0.3.7" @@ -3343,7 +3356,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi", "libc", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -3380,7 +3393,7 @@ dependencies = [ "portable-atomic", "portable-atomic-util", "serde_core", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -4449,7 +4462,7 @@ dependencies = [ "quinn-udp", "rustc-hash", "rustls 0.23.36", - "socket2 0.5.10", + "socket2 0.6.2", "thiserror 2.0.18", "tokio", "tracing", @@ -4487,9 +4500,9 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.5.10", + "socket2 0.6.2", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -4883,7 +4896,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -4954,7 +4967,7 @@ dependencies = [ "security-framework", "security-framework-sys", "webpki-root-certs", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -5517,7 +5530,7 @@ dependencies = [ [[package]] name = "syncable-cli" -version = "0.36.0" +version = "0.37.1" dependencies = [ "ahash", "aho-corasick", @@ -5545,6 +5558,7 @@ dependencies = [ "http 1.4.0", "indicatif", "inquire", + "insta", "k8s-openapi", "kube", "log", @@ -5684,7 +5698,7 @@ dependencies = [ "getrandom 0.4.1", "once_cell", "rustix", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -6577,7 +6591,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index acebde36..33bbaa36 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "syncable-cli" -version = "0.36.0" +version = "0.37.1" edition = "2024" rust-version = "1.88" # MSRV - AWS SDK requires 1.88 authors = ["Syncable Team"] @@ -125,6 +125,7 @@ assert_cmd = "2" predicates = "3" tempfile = "3" proptest = "1" +insta = { version = "1", features = ["yaml"] } # Fast debug builds - prioritize compile speed over runtime [profile.dev] diff --git a/README.md b/README.md index 0f1f0be4..9dd1dc23 100644 --- a/README.md +++ b/README.md @@ -60,6 +60,20 @@ This installs 11 skills (7 command + 4 workflow) into your AI coding agent. Then ```bash cargo install syncable-cli sync-ctl analyze . + +# Generate a CI pipeline skeleton (GitHub Actions, Azure Pipelines, or Cloud Build) +sync-ctl generate ci . --platform gcp --dry-run # preview without writing files +sync-ctl generate ci . --platform azure # write azure-pipelines.yml +sync-ctl generate ci . --platform hetzner --notify # with Slack failure alert + +# Generate a CD pipeline skeleton +sync-ctl generate cd . --platform gcp --target cloud-run --dry-run +sync-ctl generate cd . --platform azure --target aks -o ./pipelines +sync-ctl generate cd . --platform hetzner --target vps --notify + +# Generate both CI + CD in one shot +sync-ctl generate ci-cd . --platform gcp --target cloud-run --dry-run +sync-ctl generate ci-cd . --platform hetzner --target vps --notify ``` ## šŸ¤– AI Agent Skills diff --git a/azure-pipelines.yml b/azure-pipelines.yml new file mode 100644 index 00000000..9ad5abfc --- /dev/null +++ b/azure-pipelines.yml @@ -0,0 +1,58 @@ +trigger: + branches: + include: + - main + tags: + include: + - v* +pr: + branches: + include: + - main +schedules: +- cron: '{{CRON_SCHEDULE}}' + displayName: Scheduled build + branches: + include: + - main + always: true +pool: + vmImage: ubuntu-latest +steps: +- script: rustup default stable + displayName: Set up runtime +- task: Cache@2 + displayName: Cache dependencies + inputs: + key: cargo | $(Agent.OS) | **/Cargo.lock + path: |- + ~/.cargo/registry/index + ~/.cargo/registry/cache + ~/.cargo/git/db + target/ + restoreKeys: cargo | $(Agent.OS) +- script: cargo fetch + displayName: Install dependencies +- script: cargo clippy -- -D warnings + displayName: Lint +- script: cargo test + displayName: Test +- script: cargo build --release + displayName: Build +- script: docker build -t {{REGISTRY_URL}}/{{IMAGE_NAME}}:$(Build.SourceVersion) . + displayName: Build Docker image +- script: |- + curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin + trivy image --exit-code 1 --severity CRITICAL,HIGH --format sarif --output trivy-results.sarif {{REGISTRY_URL}}/{{IMAGE_NAME}}:$(Build.SourceVersion) + displayName: Scan image (Trivy) +- script: |- + curl -sSfL https://github.com/gitleaks/gitleaks/releases/latest/download/gitleaks_linux_x64.tar.gz | tar xz -C /usr/local/bin + gitleaks detect --source . --exit-code 1 + displayName: Secret scan (Gitleaks) + env: + GITHUB_TOKEN: $(GITHUB_TOKEN) +- task: PublishBuildArtifacts@1 + displayName: Upload artifact + inputs: + artifactName: syncable-cli + pathToPublish: target/release/ diff --git a/docs/command-overview.md b/docs/command-overview.md index 2e257e0e..986dfe68 100644 --- a/docs/command-overview.md +++ b/docs/command-overview.md @@ -7,7 +7,10 @@ This document provides a comprehensive reference for all Syncable CLI commands, - [Global Options](#-global-options) - [Commands](#-commands) - [analyze](#1-sync-ctl-analyze) - Project analysis - - [generate](#2-sync-ctl-generate) - IaC generation + - [generate iac](#2-sync-ctl-generate) - IaC generation (Dockerfile, Compose, Terraform) + - [generate ci](#2b-sync-ctl-generate-ci-project_path) - CI pipeline generation + - [generate cd](#2c-sync-ctl-generate-cd-project_path) - CD pipeline generation + - [generate ci-cd](#2d-sync-ctl-generate-ci-cd-project_path) - Combined CI+CD generation - [validate](#3-sync-ctl-validate) - IaC validation (planned) - [support](#4-sync-ctl-support) - Show supported tech - [dependencies](#5-sync-ctl-dependencies) - Dependency analysis @@ -164,6 +167,239 @@ sync-ctl generate . --all --force --- +### 2b. `sync-ctl generate ci ` + +Generate a CI pipeline skeleton for GitHub Actions, Azure Pipelines, or Google Cloud Build from automatic project analysis. The command detects the language, runtime version, package manager, test framework, linter, build script, and Dockerfile presence — then produces a ready-to-use YAML file with `{{PLACEHOLDER}}` tokens only where values cannot be inferred from project files. + +**Arguments:** +- `` — Path to the project directory to analyse + +**Options:** + +| Flag | Short | Description | +|------|-------|-------------| +| `--platform ` | | Target cloud platform (required) | +| `--format ` | | Override the default format for the chosen platform | +| `--dry-run` | | Print generated YAML to stdout; do not write any files | +| `--output ` | `-o` | Write files to this directory instead of the project root | +| `--env-prefix ` | | Prefix for secret/env variable names (default: `APP`) | +| `--skip-docker` | | Omit Docker build steps even when a Dockerfile is detected | +| `--notify` | | Append a Slack failure-notification step (requires `SLACK_BOT_TOKEN` and `SLACK_CHANNEL_ID` secrets) | + +**Platform defaults:** + +| Platform | Default format | Pipeline file written | +|----------|---------------|----------------------| +| `azure` | `azure-pipelines` | `azure-pipelines.yml` | +| `gcp` | `cloud-build` | `cloudbuild.yaml` | +| `hetzner` | `github-actions` | `.github/workflows/ci.yml` | + +**CI steps generated (canonical order):** + +| Step | Condition | +|------|-----------| +| Trigger config (push/PR branches, optional tag trigger) | Always | +| Checkout | Always | +| Runtime setup (language version) | Always | +| Dependency cache | Only when a lock file is detected | +| Install | Always | +| Lint | Only when a linter config file is detected | +| Test + coverage | Always | +| Build | Only when a build command is detected | +| Docker build | Only when `has_dockerfile = true` and `--skip-docker` is not set | +| Container image scan (Trivy) | Only when Docker build is present | +| Secret leak scan (Gitleaks) | Always | +| Artifact upload | Only when a build artifact path is known | +| Slack notify on failure | Only with `--notify` | + +**Output files:** + +``` +.github/workflows/ci.yml (GitHub Actions) +azure-pipelines.yml (Azure Pipelines) +cloudbuild.yaml (Cloud Build) +.syncable/SECRETS_REQUIRED.md (all platforms — secret setup instructions) +``` + +**Examples:** + +```bash +# Preview a GitHub Actions pipeline for a GCP-hosted project +sync-ctl generate ci . --platform gcp --dry-run + +# Write Azure Pipelines config +sync-ctl generate ci . --platform azure + +# Cloud Build with Slack notifications +sync-ctl generate ci . --platform gcp --notify + +# Skip Docker steps, custom output directory +sync-ctl generate ci . --platform hetzner --skip-docker --output ./ci/ + +# Custom secret prefix (secrets become MY_APP_REGISTRY_URL etc.) +sync-ctl generate ci . --platform azure --env-prefix MY_APP +``` + +**Unresolved tokens:** When a value cannot be inferred (e.g. no `.nvmrc` exists for a Node project), the generated YAML contains a `{{PLACEHOLDER}}` and `.syncable/SECRETS_REQUIRED.md` lists what needs to be filled in. + +**Status:** āœ… Implemented (EPIC 1 complete) + +--- + +### 2c. `sync-ctl generate cd ` + +Generate a CD (Continuous Deployment) pipeline skeleton for GitHub Actions from automatic project analysis. Detects Dockerfile presence, Terraform and Helm charts, migration tooling, and existing Kubernetes manifests — then produces a deployment workflow with environment gates, health checks, rollback comments, and a `SECRETS_REQUIRED.md` listing every credential needed. + +**Arguments:** +- `` — Path to the project directory to analyse (default: `.`) + +**Options:** + +| Flag | Short | Description | +|------|-------|-------------| +| `--platform ` | | Target cloud platform (required) | +| `--target ` | | Specific deploy target within the platform (see table below) | +| `--registry ` | | Container registry override (defaults per platform) | +| `--image-name ` | | Docker image name (defaults to project directory name) | +| `--dry-run` | | Print generated YAML to stdout; do not write any files | +| `--output ` | `-o` | Write files to this directory instead of the project root | +| `--force` | | Overwrite existing pipeline files | + +**Platform targets:** + +| Platform | Target | Deploy mechanism | +|----------|--------|-----------------| +| `azure` | `app-service` (default) | `az webapp deploy` | +| `azure` | `aks` | `azure/k8s-deploy@v5` + `kubectl` | +| `azure` | `container-apps` | `az containerapp update` | +| `gcp` | `cloud-run` (default) | `gcloud run deploy` | +| `gcp` | `gke` | `gcloud container clusters get-credentials` + `kubectl` | +| `hetzner` | `vps` (default) | SSH + `docker compose pull && up -d` | +| `hetzner` | `hetzner-k8s` | kubeconfig + `kubectl apply` | +| `hetzner` | `coolify` | Coolify webhook deploy | + +**Registry defaults per platform:** + +| Platform | Default registry | +|----------|-----------------| +| `azure` | ACR (Azure Container Registry) | +| `gcp` | GAR (Google Artifact Registry) | +| `hetzner` | GHCR (GitHub Container Registry) | + +**CD steps generated (canonical order):** + +| Step | Condition | +|------|-----------| +| Trigger (push to default branch + `workflow_dispatch`) | Always | +| Checkout | Always | +| Platform authentication (OIDC/SSH) | Always | +| Container registry login | Always | +| Docker build + push | Only when `has_dockerfile = true` | +| Terraform init/plan/apply | Only when Terraform directory is detected | +| Database migration | Only when migration tool is detected (Prisma, Flyway, Alembic, etc.) | +| Deploy (platform-specific) | Always | +| Health check (post-deploy) | Always | +| Slack notification | Wired up; requires `SLACK_WEBHOOK_URL` secret | +| Rollback strategy comment | Always (documents `kubectl rollout undo` / `az webapp deployment slot swap` etc.) | + +**Multi-environment structure:** Every generated pipeline includes a `staging` and `production` job. Production requires a GitHub environment approval gate (`environment: production`). + +**Output files:** + +``` +.github/workflows/deploy-.yml CD workflow +.syncable/cd-manifest.toml Machine-readable context + unresolved tokens +.syncable/SECRETS_REQUIRED.md Per-platform secret setup instructions +``` + +**Project-level config (`.syncable.cd.toml`):** + +Override any detected value without CLI flags by placing a config file at the project root: + +```toml +platform = "azure" +target = "aks" +registry = "acr" +image_name = "my-api" +health_check_path = "/api/health" +default_branch = "main" +environments = ["staging", "production"] +``` + +Priority order: auto-detected value → config file → CLI flag. + +**Examples:** + +```bash +# Preview CD pipeline for Azure App Service +sync-ctl generate cd . --platform azure --dry-run + +# Write GCP Cloud Run pipeline +sync-ctl generate cd . --platform gcp --target cloud-run + +# Hetzner VPS deploy with custom image name +sync-ctl generate cd . --platform hetzner --target vps --image-name my-api + +# AKS deploy, write to a specific directory +sync-ctl generate cd . --platform azure --target aks -o ./pipelines --force +``` + +**Status:** āœ… Implemented (EPIC 2 complete) + +--- + +### 2d. `sync-ctl generate ci-cd ` + +Generate both CI and CD pipelines in one shot. Runs both generators from a single project analysis, cross-links the `IMAGE_TAG` environment variable between CI and CD outputs, and produces a single merged `SECRETS_REQUIRED.md` covering all secrets for both pipelines. + +**Arguments:** +- `` — Path to the project directory to analyse (default: `.`) + +**Options:** + +| Flag | Short | Description | +|------|-------|-------------| +| `--platform ` | | Target cloud platform (required) | +| `--ci-format ` | | Override CI format (defaults to platform convention) | +| `--target ` | | CD deploy target (same options as `generate cd`) | +| `--registry ` | | Container registry override | +| `--image-name ` | | Docker image name | +| `--dry-run` | | Print both pipelines to stdout; do not write files | +| `--output ` | `-o` | Write all files to this directory | +| `--force` | | Overwrite existing files | +| `--notify` | | Append a Slack failure-notification step in the CI pipeline | + +**Output files:** + +``` +.github/workflows/ci.yml CI workflow (GitHub Actions) +.github/workflows/deploy-.yml CD workflow +azure-pipelines.yml CI workflow (Azure Pipelines, if platform=azure) +cloudbuild.yaml CI workflow (Cloud Build, if platform=gcp) +.syncable/cd-manifest.toml CD context + unresolved tokens +.syncable/SECRETS_REQUIRED.md Merged CI + CD secrets documentation +``` + +**Examples:** + +```bash +# Preview both pipelines for Azure AKS +sync-ctl generate ci-cd . --platform azure --target aks --dry-run + +# Write GCP Cloud Run CI+CD, with Slack notifications on CI failure +sync-ctl generate ci-cd . --platform gcp --target cloud-run --notify + +# Hetzner VPS, write to a temp directory +sync-ctl generate ci-cd . --platform hetzner --target vps -o /tmp/pipelines --force + +# Azure, override CI format to GitHub Actions instead of Azure Pipelines +sync-ctl generate ci-cd . --platform azure --ci-format github-actions --dry-run +``` + +**Status:** āœ… Implemented (CD-23) + +--- + ### 3. `sync-ctl validate ` Validate existing IaC files against best practices. diff --git a/src/.DS_Store b/src/.DS_Store deleted file mode 100644 index 37e31cbe..00000000 Binary files a/src/.DS_Store and /dev/null differ diff --git a/src/cli.rs b/src/cli.rs index 609c9b9b..bca9dde2 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -70,39 +70,10 @@ pub enum Commands { agent: bool, }, - /// Generate IaC files for a project + /// Generate files for a project (IaC, CI pipelines, and more) Generate { - /// Path to the project directory to analyze - #[arg(value_name = "PROJECT_PATH")] - path: PathBuf, - - /// Output directory for generated files - #[arg(short, long, value_name = "OUTPUT_DIR")] - output: Option, - - /// Generate Dockerfile - #[arg(long)] - dockerfile: bool, - - /// Generate Docker Compose file - #[arg(long)] - compose: bool, - - /// Generate Terraform configuration - #[arg(long)] - terraform: bool, - - /// Generate all supported IaC files - #[arg(long, conflicts_with_all = ["dockerfile", "compose", "terraform"])] - all: bool, - - /// Perform a dry run without creating files - #[arg(long)] - dry_run: bool, - - /// Overwrite existing files - #[arg(long)] - force: bool, + #[command(subcommand)] + command: GenerateCommand, }, /// Validate existing IaC files against best practices @@ -776,6 +747,223 @@ pub enum ChatProvider { Auto, } +/// Generate subcommands +#[derive(Subcommand)] +pub enum GenerateCommand { + /// Generate IaC files (Dockerfile, Docker Compose, Terraform) + Iac { + /// Path to the project directory to analyze + #[arg(value_name = "PROJECT_PATH")] + path: PathBuf, + + /// Output directory for generated files + #[arg(short, long, value_name = "OUTPUT_DIR")] + output: Option, + + /// Generate Dockerfile + #[arg(long)] + dockerfile: bool, + + /// Generate Docker Compose file + #[arg(long)] + compose: bool, + + /// Generate Terraform configuration + #[arg(long)] + terraform: bool, + + /// Generate all supported IaC files + #[arg(long, conflicts_with_all = ["dockerfile", "compose", "terraform"])] + all: bool, + + /// Perform a dry run without creating files + #[arg(long)] + dry_run: bool, + + /// Overwrite existing files + #[arg(long)] + force: bool, + }, + + /// Generate a CI pipeline skeleton for your project + Ci { + /// Path to the project directory + #[arg(value_name = "PROJECT_PATH", default_value = ".")] + path: PathBuf, + + /// Cloud platform target for the pipeline + #[arg(long, value_enum)] + platform: CiPlatform, + + /// Pipeline file format (defaults to canonical format for the chosen platform) + #[arg(long, value_enum)] + format: Option, + + /// Print the generated pipeline to stdout instead of writing files + #[arg(long)] + dry_run: bool, + + /// Output directory for generated pipeline files + #[arg(short, long, value_name = "OUTPUT_DIR")] + output: Option, + + /// Prefix applied to all environment variable and secret names + #[arg(long, value_name = "PREFIX")] + env_prefix: Option, + + /// Omit Docker build steps even when a Dockerfile is detected + #[arg(long)] + skip_docker: bool, + + /// Emit a Slack failure-notification step in the generated pipeline + #[arg(long)] + notify: bool, + }, + + /// Generate a CD (deployment) pipeline skeleton for your project + Cd { + /// Path to the project directory + #[arg(value_name = "PROJECT_PATH", default_value = ".")] + path: PathBuf, + + /// Cloud platform target for deployment + #[arg(long, value_enum)] + platform: CdPlatform, + + /// Specific deploy target within the platform + #[arg(long, value_enum)] + target: Option, + + /// Container registry to use (defaults per platform) + #[arg(long, value_enum)] + registry: Option, + + /// Docker image name (defaults to project name) + #[arg(long, value_name = "IMAGE_NAME")] + image_name: Option, + + /// Print the generated pipeline to stdout instead of writing files + #[arg(long)] + dry_run: bool, + + /// Output directory for generated pipeline files + #[arg(short, long, value_name = "OUTPUT_DIR")] + output: Option, + + /// Overwrite existing files + #[arg(long)] + force: bool, + }, + + /// Generate both CI and CD pipelines in one shot + CiCd { + /// Path to the project directory + #[arg(value_name = "PROJECT_PATH", default_value = ".")] + path: PathBuf, + + /// Cloud platform target + #[arg(long, value_enum)] + platform: CdPlatform, + + /// CI pipeline file format (defaults to GitHub Actions) + #[arg(long, value_enum)] + ci_format: Option, + + /// Specific deploy target within the platform + #[arg(long, value_enum)] + target: Option, + + /// Container registry to use (defaults per platform) + #[arg(long, value_enum)] + registry: Option, + + /// Docker image name (defaults to project name) + #[arg(long, value_name = "IMAGE_NAME")] + image_name: Option, + + /// Print the generated pipelines to stdout instead of writing files + #[arg(long)] + dry_run: bool, + + /// Output directory for generated pipeline files + #[arg(short, long, value_name = "OUTPUT_DIR")] + output: Option, + + /// Overwrite existing files + #[arg(long)] + force: bool, + + /// Emit a Slack failure-notification step in the CI pipeline + #[arg(long)] + notify: bool, + }, +} + +/// Cloud platform target for CI pipeline generation +#[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum, serde::Serialize)] +pub enum CiPlatform { + /// Microsoft Azure (Azure Pipelines) + Azure, + /// Google Cloud Platform (Cloud Build) + Gcp, + /// Hetzner (GitHub Actions on Hetzner-hosted runners) + Hetzner, +} + +/// CI pipeline file format +#[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum, serde::Serialize)] +pub enum CiFormat { + /// GitHub Actions workflow (.github/workflows/ci.yml) + GithubActions, + /// Azure Pipelines (azure-pipelines.yml) + AzurePipelines, + /// Google Cloud Build (cloudbuild.yaml) + CloudBuild, +} + +/// Cloud platform for CD pipeline generation +#[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum, serde::Serialize)] +pub enum CdPlatform { + /// Microsoft Azure (App Service, AKS, Container Apps) + Azure, + /// Google Cloud Platform (Cloud Run, GKE) + Gcp, + /// Hetzner (VPS, Kubernetes, Coolify) + Hetzner, +} + +/// Specific deploy target within a cloud platform +#[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum, serde::Serialize)] +pub enum CdTarget { + /// Azure App Service + AppService, + /// Azure Kubernetes Service + Aks, + /// Azure Container Apps + ContainerApps, + /// Google Cloud Run + CloudRun, + /// Google Kubernetes Engine + Gke, + /// Hetzner VPS (direct SSH deploy) + Vps, + /// Hetzner Kubernetes (k3s / managed) + HetznerK8s, + /// Coolify PaaS on Hetzner + Coolify, +} + +/// Container registry for CD pipeline generation +#[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum, serde::Serialize)] +pub enum CdRegistry { + /// Azure Container Registry + Acr, + /// Google Artifact Registry + Gar, + /// GitHub Container Registry + Ghcr, +} + impl Cli { /// Initialize logging based on verbosity level pub fn init_logging(&self) { diff --git a/src/config/types.rs b/src/config/types.rs index dbcc8366..594402b6 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -30,6 +30,10 @@ pub struct GenerationConfig { pub dockerfile: DockerfileConfig, pub compose: ComposeConfig, pub terraform: TerraformConfig, + /// Emit a Slack failure-notify step in generated CI pipelines. + /// Equivalent to passing `--notify` on the CLI. + #[serde(default)] + pub notify: bool, } /// Dockerfile generation configuration @@ -247,6 +251,7 @@ impl Default for Config { include_networking: true, include_monitoring: false, }, + notify: false, }, output: OutputConfig { format: OutputFormat::Files, diff --git a/src/generator/cd_generation/auth_azure.rs b/src/generator/cd_generation/auth_azure.rs new file mode 100644 index 00000000..1dc288b4 --- /dev/null +++ b/src/generator/cd_generation/auth_azure.rs @@ -0,0 +1,264 @@ +//! CD-04 — Azure OIDC Authentication Step +//! +//! Generates the GitHub Actions YAML snippet for Azure login using +//! OpenID Connect (OIDC) / Workload Identity Federation. This is the +//! recommended zero-secret-rotation approach: +//! +//! ```yaml +//! - name: Azure login (OIDC) +//! uses: azure/login@v2 +//! with: +//! client-id: ${{ secrets.AZURE_CLIENT_ID }} +//! tenant-id: ${{ secrets.AZURE_TENANT_ID }} +//! subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} +//! ``` +//! +//! The workflow must have `permissions: { id-token: write }` for OIDC to work. + +use super::schema::AuthStep; + +// ── Public types ────────────────────────────────────────────────────────────── + +/// Resolved Azure auth configuration. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct AzureAuthConfig { + /// GitHub Actions action reference. + pub action: String, + /// Auth method label. + pub method: String, + /// Secrets the user must configure. + pub required_secrets: Vec, +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Builds the Azure OIDC auth configuration. +pub fn generate_azure_auth() -> AzureAuthConfig { + AzureAuthConfig { + action: "azure/login@v2".to_string(), + method: "oidc".to_string(), + required_secrets: vec![ + "AZURE_CLIENT_ID".to_string(), + "AZURE_TENANT_ID".to_string(), + "AZURE_SUBSCRIPTION_ID".to_string(), + ], + } +} + +/// Converts an `AzureAuthConfig` into the schema `AuthStep` for pipeline assembly. +pub fn to_auth_step(config: &AzureAuthConfig) -> AuthStep { + AuthStep { + action: Some(config.action.clone()), + method: config.method.clone(), + required_secrets: config.required_secrets.clone(), + } +} + +/// Renders the Azure OIDC login step as a GitHub Actions YAML snippet. +/// +/// The output includes the `permissions` block comment as a reminder and +/// the login step itself with all three OIDC secrets. +pub fn render_azure_auth_yaml(config: &AzureAuthConfig) -> String { + format!( + "\ + - name: Azure login (OIDC) + uses: {action} + with: + client-id: ${{{{ secrets.AZURE_CLIENT_ID }}}} + tenant-id: ${{{{ secrets.AZURE_TENANT_ID }}}} + subscription-id: ${{{{ secrets.AZURE_SUBSCRIPTION_ID }}}}\n", + action = config.action, + ) +} + +/// Returns the `permissions` block needed at the job level for OIDC. +pub fn azure_oidc_permissions_yaml() -> &'static str { + "\ + permissions: + id-token: write + contents: read\n" +} + +/// Renders secrets documentation entries for Azure OIDC. +pub fn azure_auth_secrets_doc() -> String { + "\ +### `AZURE_CLIENT_ID` *(required)* + +Application (client) ID of the Azure AD App Registration used for OIDC federation. + +**Where to set:** Repository → Settings → Secrets and variables → Actions + +**How to obtain:** `az ad app show --id --query appId -o tsv` + +--- + +### `AZURE_TENANT_ID` *(required)* + +Azure Active Directory tenant ID. + +**Where to set:** Repository → Settings → Secrets and variables → Actions + +**How to obtain:** `az account show --query tenantId -o tsv` + +--- + +### `AZURE_SUBSCRIPTION_ID` *(required)* + +Azure subscription ID for the target deployment. + +**Where to set:** Repository → Settings → Secrets and variables → Actions + +**How to obtain:** `az account show --query id -o tsv`\n" + .to_string() +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + // ── generate_azure_auth ─────────────────────────────────────────── + + #[test] + fn config_uses_azure_login_v2() { + let config = generate_azure_auth(); + assert_eq!(config.action, "azure/login@v2"); + } + + #[test] + fn config_method_is_oidc() { + let config = generate_azure_auth(); + assert_eq!(config.method, "oidc"); + } + + #[test] + fn config_requires_three_secrets() { + let config = generate_azure_auth(); + assert_eq!(config.required_secrets.len(), 3); + } + + #[test] + fn config_requires_client_id() { + let config = generate_azure_auth(); + assert!(config.required_secrets.contains(&"AZURE_CLIENT_ID".to_string())); + } + + #[test] + fn config_requires_tenant_id() { + let config = generate_azure_auth(); + assert!(config.required_secrets.contains(&"AZURE_TENANT_ID".to_string())); + } + + #[test] + fn config_requires_subscription_id() { + let config = generate_azure_auth(); + assert!(config.required_secrets.contains(&"AZURE_SUBSCRIPTION_ID".to_string())); + } + + // ── to_auth_step ────────────────────────────────────────────────── + + #[test] + fn to_auth_step_preserves_action() { + let config = generate_azure_auth(); + let step = to_auth_step(&config); + assert_eq!(step.action, Some("azure/login@v2".to_string())); + } + + #[test] + fn to_auth_step_preserves_method() { + let config = generate_azure_auth(); + let step = to_auth_step(&config); + assert_eq!(step.method, "oidc"); + } + + #[test] + fn to_auth_step_preserves_secrets() { + let config = generate_azure_auth(); + let step = to_auth_step(&config); + assert_eq!(step.required_secrets.len(), 3); + } + + // ── render_azure_auth_yaml ──────────────────────────────────────── + + #[test] + fn yaml_contains_action_reference() { + let config = generate_azure_auth(); + let yaml = render_azure_auth_yaml(&config); + assert!(yaml.contains("azure/login@v2")); + } + + #[test] + fn yaml_references_client_id_secret() { + let config = generate_azure_auth(); + let yaml = render_azure_auth_yaml(&config); + assert!(yaml.contains("secrets.AZURE_CLIENT_ID")); + } + + #[test] + fn yaml_references_tenant_id_secret() { + let config = generate_azure_auth(); + let yaml = render_azure_auth_yaml(&config); + assert!(yaml.contains("secrets.AZURE_TENANT_ID")); + } + + #[test] + fn yaml_references_subscription_id_secret() { + let config = generate_azure_auth(); + let yaml = render_azure_auth_yaml(&config); + assert!(yaml.contains("secrets.AZURE_SUBSCRIPTION_ID")); + } + + #[test] + fn yaml_contains_step_name() { + let config = generate_azure_auth(); + let yaml = render_azure_auth_yaml(&config); + assert!(yaml.contains("- name: Azure login")); + } + + #[test] + fn yaml_no_hardcoded_secret_values() { + let config = generate_azure_auth(); + let yaml = render_azure_auth_yaml(&config); + // Should reference secrets, never embed UUIDs or real values + assert!(!yaml.contains("00000000-")); + assert!(yaml.contains("${{")); + } + + // ── azure_oidc_permissions_yaml ─────────────────────────────────── + + #[test] + fn permissions_contains_id_token_write() { + let perms = azure_oidc_permissions_yaml(); + assert!(perms.contains("id-token: write")); + } + + #[test] + fn permissions_contains_contents_read() { + let perms = azure_oidc_permissions_yaml(); + assert!(perms.contains("contents: read")); + } + + // ── azure_auth_secrets_doc ──────────────────────────────────────── + + #[test] + fn secrets_doc_mentions_all_three_secrets() { + let doc = azure_auth_secrets_doc(); + assert!(doc.contains("AZURE_CLIENT_ID")); + assert!(doc.contains("AZURE_TENANT_ID")); + assert!(doc.contains("AZURE_SUBSCRIPTION_ID")); + } + + #[test] + fn secrets_doc_includes_az_cli_commands() { + let doc = azure_auth_secrets_doc(); + assert!(doc.contains("az ad app show")); + assert!(doc.contains("az account show")); + } + + #[test] + fn secrets_doc_marks_all_as_required() { + let doc = azure_auth_secrets_doc(); + assert_eq!(doc.matches("*(required)*").count(), 3); + } +} diff --git a/src/generator/cd_generation/auth_gcp.rs b/src/generator/cd_generation/auth_gcp.rs new file mode 100644 index 00000000..d0606e8d --- /dev/null +++ b/src/generator/cd_generation/auth_gcp.rs @@ -0,0 +1,314 @@ +//! CD-05 — GCP Workload Identity Federation Authentication Step +//! +//! Generates the GitHub Actions YAML snippet for GCP authentication using +//! Workload Identity Federation (WIF). This is the recommended keyless +//! approach — no service account JSON keys needed: +//! +//! ```yaml +//! - name: Authenticate to Google Cloud +//! uses: google-github-actions/auth@v2 +//! with: +//! workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }} +//! service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }} +//! +//! - name: Set up Cloud SDK +//! uses: google-github-actions/setup-gcloud@v2 +//! ``` +//! +//! The workflow must have `permissions: { id-token: write }` for WIF to work. + +use super::schema::AuthStep; + +// ── Public types ────────────────────────────────────────────────────────────── + +/// Resolved GCP auth configuration. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct GcpAuthConfig { + /// GitHub Actions action reference for auth. + pub auth_action: String, + /// GitHub Actions action reference for gcloud SDK setup. + pub setup_gcloud_action: String, + /// Auth method label. + pub method: String, + /// Secrets the user must configure. + pub required_secrets: Vec, +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Builds the GCP Workload Identity Federation auth configuration. +pub fn generate_gcp_auth() -> GcpAuthConfig { + GcpAuthConfig { + auth_action: "google-github-actions/auth@v2".to_string(), + setup_gcloud_action: "google-github-actions/setup-gcloud@v2".to_string(), + method: "workload-identity".to_string(), + required_secrets: vec![ + "GCP_WORKLOAD_IDENTITY_PROVIDER".to_string(), + "GCP_SERVICE_ACCOUNT".to_string(), + ], + } +} + +/// Converts a `GcpAuthConfig` into the schema `AuthStep` for pipeline assembly. +pub fn to_auth_step(config: &GcpAuthConfig) -> AuthStep { + AuthStep { + action: Some(config.auth_action.clone()), + method: config.method.clone(), + required_secrets: config.required_secrets.clone(), + } +} + +/// Renders the GCP WIF auth steps as a GitHub Actions YAML snippet. +/// +/// Emits two steps: +/// 1. `google-github-actions/auth@v2` — authenticates via WIF +/// 2. `google-github-actions/setup-gcloud@v2` — configures the `gcloud` CLI +pub fn render_gcp_auth_yaml(config: &GcpAuthConfig) -> String { + format!( + "\ + - name: Authenticate to Google Cloud + uses: {auth_action} + with: + workload_identity_provider: ${{{{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}}} + service_account: ${{{{ secrets.GCP_SERVICE_ACCOUNT }}}} + + - name: Set up Cloud SDK + uses: {setup_action}\n", + auth_action = config.auth_action, + setup_action = config.setup_gcloud_action, + ) +} + +/// Renders the GAR Docker auth configuration step. +/// +/// After WIF auth, this step configures Docker to authenticate against +/// Google Artifact Registry using `gcloud auth configure-docker`. +pub fn render_gar_docker_auth_yaml(gar_location: &str) -> String { + format!( + "\ + - name: Configure Docker for Artifact Registry + run: gcloud auth configure-docker {gar_location}-docker.pkg.dev --quiet\n" + ) +} + +/// Returns the `permissions` block needed at the job level for WIF. +pub fn gcp_wif_permissions_yaml() -> &'static str { + "\ + permissions: + id-token: write + contents: read\n" +} + +/// Renders secrets documentation entries for GCP WIF. +pub fn gcp_auth_secrets_doc() -> String { + "\ +### `GCP_WORKLOAD_IDENTITY_PROVIDER` *(required)* + +Full resource name of the Workload Identity Federation provider. + +Format: `projects//locations/global/workloadIdentityPools//providers/` + +**Where to set:** Repository → Settings → Secrets and variables → Actions + +**How to obtain:** +```bash +gcloud iam workload-identity-pools providers describe \\ + --project= \\ + --location=global \\ + --workload-identity-pool= \\ + --format='value(name)' +``` + +--- + +### `GCP_SERVICE_ACCOUNT` *(required)* + +Email address of the Google Cloud service account to impersonate. + +Format: `@.iam.gserviceaccount.com` + +**Where to set:** Repository → Settings → Secrets and variables → Actions + +**How to obtain:** `gcloud iam service-accounts list --project=`\n" + .to_string() +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + // ── generate_gcp_auth ───────────────────────────────────────────── + + #[test] + fn config_uses_google_auth_v2() { + let config = generate_gcp_auth(); + assert_eq!(config.auth_action, "google-github-actions/auth@v2"); + } + + #[test] + fn config_uses_setup_gcloud_v2() { + let config = generate_gcp_auth(); + assert_eq!(config.setup_gcloud_action, "google-github-actions/setup-gcloud@v2"); + } + + #[test] + fn config_method_is_workload_identity() { + let config = generate_gcp_auth(); + assert_eq!(config.method, "workload-identity"); + } + + #[test] + fn config_requires_two_secrets() { + let config = generate_gcp_auth(); + assert_eq!(config.required_secrets.len(), 2); + } + + #[test] + fn config_requires_wif_provider() { + let config = generate_gcp_auth(); + assert!(config + .required_secrets + .contains(&"GCP_WORKLOAD_IDENTITY_PROVIDER".to_string())); + } + + #[test] + fn config_requires_service_account() { + let config = generate_gcp_auth(); + assert!(config + .required_secrets + .contains(&"GCP_SERVICE_ACCOUNT".to_string())); + } + + // ── to_auth_step ────────────────────────────────────────────────── + + #[test] + fn to_auth_step_preserves_action() { + let config = generate_gcp_auth(); + let step = to_auth_step(&config); + assert_eq!(step.action, Some("google-github-actions/auth@v2".to_string())); + } + + #[test] + fn to_auth_step_preserves_method() { + let config = generate_gcp_auth(); + let step = to_auth_step(&config); + assert_eq!(step.method, "workload-identity"); + } + + #[test] + fn to_auth_step_preserves_secrets() { + let config = generate_gcp_auth(); + let step = to_auth_step(&config); + assert_eq!(step.required_secrets.len(), 2); + } + + // ── render_gcp_auth_yaml ────────────────────────────────────────── + + #[test] + fn yaml_contains_auth_action() { + let config = generate_gcp_auth(); + let yaml = render_gcp_auth_yaml(&config); + assert!(yaml.contains("google-github-actions/auth@v2")); + } + + #[test] + fn yaml_contains_setup_gcloud_action() { + let config = generate_gcp_auth(); + let yaml = render_gcp_auth_yaml(&config); + assert!(yaml.contains("google-github-actions/setup-gcloud@v2")); + } + + #[test] + fn yaml_references_wif_provider_secret() { + let config = generate_gcp_auth(); + let yaml = render_gcp_auth_yaml(&config); + assert!(yaml.contains("secrets.GCP_WORKLOAD_IDENTITY_PROVIDER")); + } + + #[test] + fn yaml_references_service_account_secret() { + let config = generate_gcp_auth(); + let yaml = render_gcp_auth_yaml(&config); + assert!(yaml.contains("secrets.GCP_SERVICE_ACCOUNT")); + } + + #[test] + fn yaml_contains_two_step_names() { + let config = generate_gcp_auth(); + let yaml = render_gcp_auth_yaml(&config); + assert!(yaml.contains("Authenticate to Google Cloud")); + assert!(yaml.contains("Set up Cloud SDK")); + } + + #[test] + fn yaml_no_hardcoded_json_keys() { + let config = generate_gcp_auth(); + let yaml = render_gcp_auth_yaml(&config); + assert!(!yaml.contains("\"type\": \"service_account\"")); + assert!(yaml.contains("${{")); + } + + // ── render_gar_docker_auth_yaml ─────────────────────────────────── + + #[test] + fn gar_docker_auth_contains_configure_docker() { + let yaml = render_gar_docker_auth_yaml("us-central1"); + assert!(yaml.contains("gcloud auth configure-docker")); + } + + #[test] + fn gar_docker_auth_includes_location() { + let yaml = render_gar_docker_auth_yaml("europe-west1"); + assert!(yaml.contains("europe-west1-docker.pkg.dev")); + } + + #[test] + fn gar_docker_auth_uses_quiet_flag() { + let yaml = render_gar_docker_auth_yaml("us-central1"); + assert!(yaml.contains("--quiet")); + } + + // ── gcp_wif_permissions_yaml ────────────────────────────────────── + + #[test] + fn permissions_contains_id_token_write() { + let perms = gcp_wif_permissions_yaml(); + assert!(perms.contains("id-token: write")); + } + + #[test] + fn permissions_contains_contents_read() { + let perms = gcp_wif_permissions_yaml(); + assert!(perms.contains("contents: read")); + } + + // ── gcp_auth_secrets_doc ────────────────────────────────────────── + + #[test] + fn secrets_doc_mentions_both_secrets() { + let doc = gcp_auth_secrets_doc(); + assert!(doc.contains("GCP_WORKLOAD_IDENTITY_PROVIDER")); + assert!(doc.contains("GCP_SERVICE_ACCOUNT")); + } + + #[test] + fn secrets_doc_includes_gcloud_commands() { + let doc = gcp_auth_secrets_doc(); + assert!(doc.contains("gcloud iam")); + } + + #[test] + fn secrets_doc_marks_all_as_required() { + let doc = gcp_auth_secrets_doc(); + assert_eq!(doc.matches("*(required)*").count(), 2); + } + + #[test] + fn secrets_doc_includes_format_example() { + let doc = gcp_auth_secrets_doc(); + assert!(doc.contains("projects/")); + assert!(doc.contains("iam.gserviceaccount.com")); + } +} diff --git a/src/generator/cd_generation/auth_hetzner.rs b/src/generator/cd_generation/auth_hetzner.rs new file mode 100644 index 00000000..77eb9904 --- /dev/null +++ b/src/generator/cd_generation/auth_hetzner.rs @@ -0,0 +1,381 @@ +//! CD-06 — Hetzner SSH Authentication Step +//! +//! Generates the GitHub Actions YAML snippets for Hetzner deployments. +//! Hetzner has no managed OIDC integration, so we use: +//! +//! - **VPS / Docker Compose targets:** SSH key via `webfactory/ssh-agent@v0.9.0` +//! - **K8s targets:** `kubectl` kubeconfig written from a secret +//! +//! ## VPS pattern +//! +//! ```yaml +//! - name: Set up SSH agent +//! uses: webfactory/ssh-agent@v0.9.0 +//! with: +//! ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }} +//! +//! - name: Add host to known_hosts +//! run: ssh-keyscan -H ${{ secrets.SSH_HOST }} >> ~/.ssh/known_hosts +//! ``` +//! +//! ## K8s pattern +//! +//! ```yaml +//! - name: Set up kubeconfig +//! run: | +//! mkdir -p ~/.kube +//! echo "${{ secrets.KUBECONFIG }}" > ~/.kube/config +//! chmod 600 ~/.kube/config +//! ``` + +use super::context::DeployTarget; +use super::schema::AuthStep; + +// ── Public types ────────────────────────────────────────────────────────────── + +/// Resolved Hetzner auth configuration. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct HetznerAuthConfig { + /// Auth method label: `"ssh"` or `"kubeconfig"`. + pub method: String, + /// Secrets the user must configure. + pub required_secrets: Vec, + /// Deploy target determines which auth pattern to use. + pub target: DeployTarget, +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Builds the Hetzner auth configuration for the given deploy target. +/// +/// - VPS / Coolify → SSH-based auth +/// - HetznerK8s → Kubeconfig-based auth +pub fn generate_hetzner_auth(target: &DeployTarget) -> HetznerAuthConfig { + match target { + DeployTarget::HetznerK8s => HetznerAuthConfig { + method: "kubeconfig".to_string(), + required_secrets: vec!["KUBECONFIG".to_string()], + target: target.clone(), + }, + // VPS, Coolify, and any other Hetzner target use SSH + _ => HetznerAuthConfig { + method: "ssh".to_string(), + required_secrets: vec![ + "SSH_PRIVATE_KEY".to_string(), + "SSH_HOST".to_string(), + "SSH_USER".to_string(), + ], + target: target.clone(), + }, + } +} + +/// Converts a `HetznerAuthConfig` into the schema `AuthStep` for pipeline assembly. +pub fn to_auth_step(config: &HetznerAuthConfig) -> AuthStep { + AuthStep { + action: match config.method.as_str() { + "ssh" => Some("webfactory/ssh-agent@v0.9.0".to_string()), + _ => None, + }, + method: config.method.clone(), + required_secrets: config.required_secrets.clone(), + } +} + +/// Renders the Hetzner auth steps as a GitHub Actions YAML snippet. +/// +/// For SSH targets, emits the ssh-agent setup + known_hosts step. +/// For K8s targets, emits the kubeconfig write step. +pub fn render_hetzner_auth_yaml(config: &HetznerAuthConfig) -> String { + match config.method.as_str() { + "kubeconfig" => render_kubeconfig_auth(), + _ => render_ssh_auth(), + } +} + +/// Renders the SSH-based auth snippet (VPS / Coolify). +fn render_ssh_auth() -> String { + format!( + "\ + - name: Set up SSH agent + uses: webfactory/ssh-agent@v0.9.0 + with: + ssh-private-key: ${{{{ secrets.SSH_PRIVATE_KEY }}}} + + - name: Add host to known_hosts + run: ssh-keyscan -H ${{{{ secrets.SSH_HOST }}}} >> ~/.ssh/known_hosts\n" + ) +} + +/// Renders the kubeconfig-based auth snippet (HetznerK8s). +fn render_kubeconfig_auth() -> String { + format!( + "\ + - name: Set up kubeconfig + run: | + mkdir -p ~/.kube + echo \"${{{{ secrets.KUBECONFIG }}}}\" > ~/.kube/config + chmod 600 ~/.kube/config\n" + ) +} + +/// Renders secrets documentation entries for Hetzner auth. +pub fn hetzner_auth_secrets_doc(config: &HetznerAuthConfig) -> String { + match config.method.as_str() { + "kubeconfig" => "\ +### `KUBECONFIG` *(required)* + +Base64-encoded or raw kubeconfig for the Hetzner Kubernetes cluster. + +**Where to set:** Repository → Settings → Secrets and variables → Actions + +**How to obtain:** +```bash +hcloud kubernetes cluster kubeconfig --name +# or +kubectl config view --raw --minify +``` + +**Important:** Ensure the kubeconfig uses a service account token, not a user certificate that expires.\n" + .to_string(), + _ => "\ +### `SSH_PRIVATE_KEY` *(required)* + +Ed25519 or RSA private key for SSH access to the Hetzner VPS. + +**Where to set:** Repository → Settings → Secrets and variables → Actions + +**How to obtain:** +```bash +ssh-keygen -t ed25519 -C \"github-actions-deploy\" -f deploy_key -N \"\" +# Add deploy_key.pub to the server's ~/.ssh/authorized_keys +# Paste the contents of deploy_key into the secret +``` + +--- + +### `SSH_HOST` *(required)* + +IP address or hostname of the Hetzner VPS. + +**Where to set:** Repository → Settings → Secrets and variables → Actions + +--- + +### `SSH_USER` *(required)* + +Username for SSH login, e.g. `deploy` or `root`. + +**Where to set:** Repository → Settings → Secrets and variables → Actions + +**Best practice:** Create a dedicated `deploy` user with limited sudo privileges rather than using `root`.\n" + .to_string(), + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use super::super::context::DeployTarget; + + // ── generate_hetzner_auth — VPS ─────────────────────────────────── + + #[test] + fn vps_config_method_is_ssh() { + let config = generate_hetzner_auth(&DeployTarget::Vps); + assert_eq!(config.method, "ssh"); + } + + #[test] + fn vps_config_requires_three_secrets() { + let config = generate_hetzner_auth(&DeployTarget::Vps); + assert_eq!(config.required_secrets.len(), 3); + } + + #[test] + fn vps_config_requires_ssh_private_key() { + let config = generate_hetzner_auth(&DeployTarget::Vps); + assert!(config.required_secrets.contains(&"SSH_PRIVATE_KEY".to_string())); + } + + #[test] + fn vps_config_requires_ssh_host() { + let config = generate_hetzner_auth(&DeployTarget::Vps); + assert!(config.required_secrets.contains(&"SSH_HOST".to_string())); + } + + #[test] + fn vps_config_requires_ssh_user() { + let config = generate_hetzner_auth(&DeployTarget::Vps); + assert!(config.required_secrets.contains(&"SSH_USER".to_string())); + } + + // ── generate_hetzner_auth — Coolify ─────────────────────────────── + + #[test] + fn coolify_also_uses_ssh() { + let config = generate_hetzner_auth(&DeployTarget::Coolify); + assert_eq!(config.method, "ssh"); + } + + // ── generate_hetzner_auth — K8s ─────────────────────────────────── + + #[test] + fn k8s_config_method_is_kubeconfig() { + let config = generate_hetzner_auth(&DeployTarget::HetznerK8s); + assert_eq!(config.method, "kubeconfig"); + } + + #[test] + fn k8s_config_requires_one_secret() { + let config = generate_hetzner_auth(&DeployTarget::HetznerK8s); + assert_eq!(config.required_secrets.len(), 1); + } + + #[test] + fn k8s_config_requires_kubeconfig() { + let config = generate_hetzner_auth(&DeployTarget::HetznerK8s); + assert!(config.required_secrets.contains(&"KUBECONFIG".to_string())); + } + + // ── to_auth_step ────────────────────────────────────────────────── + + #[test] + fn ssh_auth_step_has_action() { + let config = generate_hetzner_auth(&DeployTarget::Vps); + let step = to_auth_step(&config); + assert_eq!(step.action, Some("webfactory/ssh-agent@v0.9.0".to_string())); + } + + #[test] + fn k8s_auth_step_has_no_action() { + let config = generate_hetzner_auth(&DeployTarget::HetznerK8s); + let step = to_auth_step(&config); + assert!(step.action.is_none()); + } + + #[test] + fn to_auth_step_preserves_method() { + let config = generate_hetzner_auth(&DeployTarget::Vps); + let step = to_auth_step(&config); + assert_eq!(step.method, "ssh"); + } + + #[test] + fn to_auth_step_preserves_secrets() { + let config = generate_hetzner_auth(&DeployTarget::Vps); + let step = to_auth_step(&config); + assert_eq!(step.required_secrets.len(), 3); + } + + // ── render_hetzner_auth_yaml — SSH ──────────────────────────────── + + #[test] + fn ssh_yaml_contains_ssh_agent_action() { + let config = generate_hetzner_auth(&DeployTarget::Vps); + let yaml = render_hetzner_auth_yaml(&config); + assert!(yaml.contains("webfactory/ssh-agent@v0.9.0")); + } + + #[test] + fn ssh_yaml_references_private_key_secret() { + let config = generate_hetzner_auth(&DeployTarget::Vps); + let yaml = render_hetzner_auth_yaml(&config); + assert!(yaml.contains("secrets.SSH_PRIVATE_KEY")); + } + + #[test] + fn ssh_yaml_contains_known_hosts_step() { + let config = generate_hetzner_auth(&DeployTarget::Vps); + let yaml = render_hetzner_auth_yaml(&config); + assert!(yaml.contains("ssh-keyscan")); + assert!(yaml.contains("known_hosts")); + } + + #[test] + fn ssh_yaml_references_host_secret() { + let config = generate_hetzner_auth(&DeployTarget::Vps); + let yaml = render_hetzner_auth_yaml(&config); + assert!(yaml.contains("secrets.SSH_HOST")); + } + + #[test] + fn ssh_yaml_contains_two_steps() { + let config = generate_hetzner_auth(&DeployTarget::Vps); + let yaml = render_hetzner_auth_yaml(&config); + let step_count = yaml.matches("- name:").count(); + assert_eq!(step_count, 2); + } + + // ── render_hetzner_auth_yaml — Kubeconfig ───────────────────────── + + #[test] + fn k8s_yaml_creates_kube_directory() { + let config = generate_hetzner_auth(&DeployTarget::HetznerK8s); + let yaml = render_hetzner_auth_yaml(&config); + assert!(yaml.contains("mkdir -p ~/.kube")); + } + + #[test] + fn k8s_yaml_writes_kubeconfig() { + let config = generate_hetzner_auth(&DeployTarget::HetznerK8s); + let yaml = render_hetzner_auth_yaml(&config); + assert!(yaml.contains("secrets.KUBECONFIG")); + assert!(yaml.contains("~/.kube/config")); + } + + #[test] + fn k8s_yaml_sets_secure_permissions() { + let config = generate_hetzner_auth(&DeployTarget::HetznerK8s); + let yaml = render_hetzner_auth_yaml(&config); + assert!(yaml.contains("chmod 600")); + } + + #[test] + fn k8s_yaml_does_not_contain_ssh_agent() { + let config = generate_hetzner_auth(&DeployTarget::HetznerK8s); + let yaml = render_hetzner_auth_yaml(&config); + assert!(!yaml.contains("ssh-agent")); + } + + // ── hetzner_auth_secrets_doc ────────────────────────────────────── + + #[test] + fn ssh_secrets_doc_mentions_all_secrets() { + let config = generate_hetzner_auth(&DeployTarget::Vps); + let doc = hetzner_auth_secrets_doc(&config); + assert!(doc.contains("SSH_PRIVATE_KEY")); + assert!(doc.contains("SSH_HOST")); + assert!(doc.contains("SSH_USER")); + } + + #[test] + fn ssh_secrets_doc_includes_keygen_instructions() { + let config = generate_hetzner_auth(&DeployTarget::Vps); + let doc = hetzner_auth_secrets_doc(&config); + assert!(doc.contains("ssh-keygen")); + } + + #[test] + fn k8s_secrets_doc_mentions_kubeconfig() { + let config = generate_hetzner_auth(&DeployTarget::HetznerK8s); + let doc = hetzner_auth_secrets_doc(&config); + assert!(doc.contains("KUBECONFIG")); + } + + #[test] + fn k8s_secrets_doc_includes_hcloud_command() { + let config = generate_hetzner_auth(&DeployTarget::HetznerK8s); + let doc = hetzner_auth_secrets_doc(&config); + assert!(doc.contains("hcloud")); + } + + #[test] + fn ssh_secrets_doc_recommends_deploy_user() { + let config = generate_hetzner_auth(&DeployTarget::Vps); + let doc = hetzner_auth_secrets_doc(&config); + assert!(doc.contains("deploy")); + } +} diff --git a/src/generator/cd_generation/cd_config.rs b/src/generator/cd_generation/cd_config.rs new file mode 100644 index 00000000..43d3de4b --- /dev/null +++ b/src/generator/cd_generation/cd_config.rs @@ -0,0 +1,501 @@ +//! CD-24 — `.syncable.cd.toml` Project-Level Config +//! +//! Parses the optional `[cd]` block from `.syncable.toml` (or a standalone +//! `.syncable.cd.toml`). Every field carries `#[serde(default)]` so partial +//! configs are always valid — only the keys present in the file are applied. +//! +//! Priority order (lowest → highest): +//! detected value < config file < CLI flags +//! +//! `merge_config_into_cd_context()` applies the config-file layer; CLI flags +//! are handled in `handle_generate_cd()` after this call. + +use std::path::Path; + +use serde::Deserialize; + +use super::context::{CdContext, CdPlatform, DeployTarget, Environment, Registry}; + +// ── Config struct ───────────────────────────────────────────────────────────── + +/// Represents the `[cd]` section of `.syncable.toml` / `.syncable.cd.toml`. +/// +/// All fields are `Option` so that absent keys are distinguishable from +/// explicit `""` values, and `Default` gives every field `None` which the +/// merge function treats as "not set — keep the detected value". +#[derive(Debug, Clone, Deserialize, Default)] +#[serde(default)] +pub struct CdConfig { + /// Override the detected platform (`azure`, `gcp`, `hetzner`). + pub platform: Option, + /// Override the deploy target (e.g. `app-service`, `cloud-run`, `vps`). + pub target: Option, + /// Environments to generate (e.g. `["staging", "production"]`). + pub environments: Option>, + /// Override the container registry (`acr`, `gar`, `ghcr`). + pub registry: Option, + /// Override the Docker image name. + pub image_name: Option, + /// Override the health check path. + pub health_check_path: Option, + /// Override the migration command. + pub migration_command: Option, + /// Override the default branch. + pub default_branch: Option, +} + +/// Wraps `CdConfig` when parsing from a full `.syncable.toml` that uses a +/// `[cd]` table header. +#[derive(Debug, Deserialize, Default)] +#[serde(default)] +struct SyncableToml { + cd: CdConfig, +} + +// ── File discovery ──────────────────────────────────────────────────────────── + +/// Attempts to load CD config from the project root. +/// +/// Look-up order: +/// 1. `.syncable.cd.toml` — dedicated file, takes precedence +/// 2. `.syncable.toml` — shared config, reads the `[cd]` table +/// +/// Returns `None` when neither file exists. +pub fn load_cd_config(project_root: &Path) -> crate::Result> { + // 1. Dedicated file + let dedicated = project_root.join(".syncable.cd.toml"); + if dedicated.exists() { + let raw = std::fs::read_to_string(&dedicated)?; + let cfg: CdConfig = toml::from_str(&raw).map_err(|e| { + crate::error::IaCGeneratorError::Config(crate::error::ConfigError::ParsingFailed( + e.to_string(), + )) + })?; + return Ok(Some(cfg)); + } + + // 2. Shared file with [cd] table + let shared = project_root.join(".syncable.toml"); + if shared.exists() { + let raw = std::fs::read_to_string(&shared)?; + let wrapper: SyncableToml = toml::from_str(&raw).map_err(|e| { + crate::error::IaCGeneratorError::Config(crate::error::ConfigError::ParsingFailed( + e.to_string(), + )) + })?; + let cfg = wrapper.cd; + if cfg.platform.is_some() + || cfg.target.is_some() + || cfg.environments.is_some() + || cfg.registry.is_some() + || cfg.image_name.is_some() + || cfg.health_check_path.is_some() + || cfg.migration_command.is_some() + || cfg.default_branch.is_some() + { + return Ok(Some(cfg)); + } + } + + Ok(None) +} + +// ── Merge ───────────────────────────────────────────────────────────────────── + +/// Applies `config` onto `ctx`, overwriting only the fields the config file +/// explicitly set. CLI flags are applied *after* this call and will win over +/// both detected values and config-file values. +pub fn merge_config_into_cd_context(config: &CdConfig, ctx: &mut CdContext) { + if let Some(ref p) = config.platform + && let Some(platform) = parse_platform(p) + { + ctx.platform = platform; + } + + if let Some(ref t) = config.target + && let Some(target) = parse_deploy_target(t) + { + ctx.deploy_target = target; + } + + if let Some(ref envs) = config.environments { + ctx.environments = envs + .iter() + .map(|name| Environment { + name: name.clone(), + requires_approval: name == "production", + }) + .collect(); + } + + if let Some(ref r) = config.registry + && let Some(registry) = parse_registry(r) + { + ctx.registry = registry; + } + + if let Some(ref img) = config.image_name { + ctx.image_name = img.clone(); + } + + if let Some(ref path) = config.health_check_path { + ctx.health_check_path = Some(path.clone()); + } + + if let Some(ref cmd) = config.migration_command { + ctx.migration_command_override = Some(cmd.clone()); + } + + if let Some(ref branch) = config.default_branch { + ctx.default_branch = branch.clone(); + } +} + +// ── Parsers ─────────────────────────────────────────────────────────────────── + +fn parse_platform(s: &str) -> Option { + match s.to_lowercase().as_str() { + "azure" => Some(CdPlatform::Azure), + "gcp" => Some(CdPlatform::Gcp), + "hetzner" => Some(CdPlatform::Hetzner), + _ => None, + } +} + +fn parse_deploy_target(s: &str) -> Option { + match s.to_lowercase().replace('_', "-").as_str() { + "app-service" | "appservice" => Some(DeployTarget::AppService), + "aks" => Some(DeployTarget::Aks), + "container-apps" | "containerapps" => Some(DeployTarget::ContainerApps), + "cloud-run" | "cloudrun" => Some(DeployTarget::CloudRun), + "gke" => Some(DeployTarget::Gke), + "vps" => Some(DeployTarget::Vps), + "hetzner-k8s" | "hetznerk8s" | "k8s" => Some(DeployTarget::HetznerK8s), + "coolify" => Some(DeployTarget::Coolify), + _ => None, + } +} + +fn parse_registry(s: &str) -> Option { + match s.to_lowercase().as_str() { + "acr" => Some(Registry::Acr), + "gar" => Some(Registry::Gar), + "ghcr" => Some(Registry::Ghcr), + _ => None, + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + fn parse_config(toml_str: &str) -> CdConfig { + toml::from_str(toml_str).unwrap() + } + + #[test] + fn parse_full_config() { + let cfg = parse_config( + r#" + platform = "azure" + target = "app-service" + environments = ["staging", "production"] + registry = "acr" + image_name = "my-app" + health_check_path = "/api/health" + migration_command = "npm run db:migrate" + default_branch = "main" + "#, + ); + assert_eq!(cfg.platform.as_deref(), Some("azure")); + assert_eq!(cfg.target.as_deref(), Some("app-service")); + assert_eq!(cfg.environments.as_ref().unwrap().len(), 2); + assert_eq!(cfg.registry.as_deref(), Some("acr")); + assert_eq!(cfg.image_name.as_deref(), Some("my-app")); + assert_eq!(cfg.health_check_path.as_deref(), Some("/api/health")); + assert_eq!(cfg.migration_command.as_deref(), Some("npm run db:migrate")); + assert_eq!(cfg.default_branch.as_deref(), Some("main")); + } + + #[test] + fn parse_partial_config() { + let cfg = parse_config( + r#" + platform = "gcp" + "#, + ); + assert_eq!(cfg.platform.as_deref(), Some("gcp")); + assert!(cfg.target.is_none()); + assert!(cfg.environments.is_none()); + } + + #[test] + fn parse_empty_config() { + let cfg = parse_config(""); + assert!(cfg.platform.is_none()); + assert!(cfg.target.is_none()); + } + + #[test] + fn load_config_from_dedicated_file() { + let dir = TempDir::new().unwrap(); + std::fs::write( + dir.path().join(".syncable.cd.toml"), + r#"platform = "hetzner""#, + ) + .unwrap(); + let cfg = load_cd_config(dir.path()).unwrap(); + assert!(cfg.is_some()); + assert_eq!(cfg.unwrap().platform.as_deref(), Some("hetzner")); + } + + #[test] + fn load_config_from_shared_file() { + let dir = TempDir::new().unwrap(); + std::fs::write( + dir.path().join(".syncable.toml"), + r#" + [cd] + platform = "azure" + target = "aks" + "#, + ) + .unwrap(); + let cfg = load_cd_config(dir.path()).unwrap(); + assert!(cfg.is_some()); + let c = cfg.unwrap(); + assert_eq!(c.platform.as_deref(), Some("azure")); + assert_eq!(c.target.as_deref(), Some("aks")); + } + + #[test] + fn load_config_dedicated_takes_precedence() { + let dir = TempDir::new().unwrap(); + std::fs::write( + dir.path().join(".syncable.cd.toml"), + r#"platform = "gcp""#, + ) + .unwrap(); + std::fs::write( + dir.path().join(".syncable.toml"), + r#" + [cd] + platform = "azure" + "#, + ) + .unwrap(); + let cfg = load_cd_config(dir.path()).unwrap(); + assert_eq!(cfg.unwrap().platform.as_deref(), Some("gcp")); + } + + #[test] + fn load_config_no_files() { + let dir = TempDir::new().unwrap(); + let cfg = load_cd_config(dir.path()).unwrap(); + assert!(cfg.is_none()); + } + + #[test] + fn load_config_shared_no_cd_section() { + let dir = TempDir::new().unwrap(); + std::fs::write( + dir.path().join(".syncable.toml"), + r#" + [ci] + platform = "azure" + "#, + ) + .unwrap(); + let cfg = load_cd_config(dir.path()).unwrap(); + assert!(cfg.is_none()); + } + + #[test] + fn parse_platform_variants() { + assert_eq!(parse_platform("azure"), Some(CdPlatform::Azure)); + assert_eq!(parse_platform("Azure"), Some(CdPlatform::Azure)); + assert_eq!(parse_platform("gcp"), Some(CdPlatform::Gcp)); + assert_eq!(parse_platform("hetzner"), Some(CdPlatform::Hetzner)); + assert_eq!(parse_platform("unknown"), None); + } + + #[test] + fn parse_deploy_target_variants() { + assert_eq!( + parse_deploy_target("app-service"), + Some(DeployTarget::AppService) + ); + assert_eq!( + parse_deploy_target("appservice"), + Some(DeployTarget::AppService) + ); + assert_eq!(parse_deploy_target("aks"), Some(DeployTarget::Aks)); + assert_eq!( + parse_deploy_target("container-apps"), + Some(DeployTarget::ContainerApps) + ); + assert_eq!( + parse_deploy_target("cloud-run"), + Some(DeployTarget::CloudRun) + ); + assert_eq!(parse_deploy_target("gke"), Some(DeployTarget::Gke)); + assert_eq!(parse_deploy_target("vps"), Some(DeployTarget::Vps)); + assert_eq!( + parse_deploy_target("hetzner-k8s"), + Some(DeployTarget::HetznerK8s) + ); + assert_eq!( + parse_deploy_target("coolify"), + Some(DeployTarget::Coolify) + ); + assert_eq!(parse_deploy_target("unknown"), None); + } + + #[test] + fn parse_registry_variants() { + assert_eq!(parse_registry("acr"), Some(Registry::Acr)); + assert_eq!(parse_registry("gar"), Some(Registry::Gar)); + assert_eq!(parse_registry("ghcr"), Some(Registry::Ghcr)); + assert_eq!(parse_registry("unknown"), None); + } + + #[test] + fn merge_platform() { + let cfg = parse_config(r#"platform = "gcp""#); + let dir = TempDir::new().unwrap(); + // Create a minimal CdContext via fixture + let mut ctx = make_test_context(dir.path()); + assert_eq!(ctx.platform, CdPlatform::Azure); + merge_config_into_cd_context(&cfg, &mut ctx); + assert_eq!(ctx.platform, CdPlatform::Gcp); + } + + #[test] + fn merge_target() { + let cfg = parse_config(r#"target = "gke""#); + let dir = TempDir::new().unwrap(); + let mut ctx = make_test_context(dir.path()); + merge_config_into_cd_context(&cfg, &mut ctx); + assert_eq!(ctx.deploy_target, DeployTarget::Gke); + } + + #[test] + fn merge_environments() { + let cfg = parse_config(r#"environments = ["dev", "prod"]"#); + let dir = TempDir::new().unwrap(); + let mut ctx = make_test_context(dir.path()); + merge_config_into_cd_context(&cfg, &mut ctx); + assert_eq!(ctx.environments.len(), 2); + assert_eq!(ctx.environments[0].name, "dev"); + assert!(!ctx.environments[0].requires_approval); + // "production" triggers approval + assert!(!ctx.environments[1].requires_approval); // "prod" != "production" + } + + #[test] + fn merge_environments_production_approval() { + let cfg = parse_config(r#"environments = ["staging", "production"]"#); + let dir = TempDir::new().unwrap(); + let mut ctx = make_test_context(dir.path()); + merge_config_into_cd_context(&cfg, &mut ctx); + assert!(!ctx.environments[0].requires_approval); + assert!(ctx.environments[1].requires_approval); + } + + #[test] + fn merge_image_name() { + let cfg = parse_config(r#"image_name = "custom-app""#); + let dir = TempDir::new().unwrap(); + let mut ctx = make_test_context(dir.path()); + merge_config_into_cd_context(&cfg, &mut ctx); + assert_eq!(ctx.image_name, "custom-app"); + } + + #[test] + fn merge_health_check_path() { + let cfg = parse_config(r#"health_check_path = "/healthz""#); + let dir = TempDir::new().unwrap(); + let mut ctx = make_test_context(dir.path()); + merge_config_into_cd_context(&cfg, &mut ctx); + assert_eq!(ctx.health_check_path.as_deref(), Some("/healthz")); + } + + #[test] + fn merge_default_branch() { + let cfg = parse_config(r#"default_branch = "master""#); + let dir = TempDir::new().unwrap(); + let mut ctx = make_test_context(dir.path()); + merge_config_into_cd_context(&cfg, &mut ctx); + assert_eq!(ctx.default_branch, "master"); + } + + #[test] + fn merge_migration_command() { + let cfg = parse_config(r#"migration_command = "bundle exec rails db:migrate""#); + let dir = TempDir::new().unwrap(); + let mut ctx = make_test_context(dir.path()); + assert!(ctx.migration_command_override.is_none()); + merge_config_into_cd_context(&cfg, &mut ctx); + assert_eq!( + ctx.migration_command_override.as_deref(), + Some("bundle exec rails db:migrate") + ); + } + + #[test] + fn merge_migration_command_absent_leaves_none() { + let cfg = parse_config(r#"platform = "gcp""#); + let dir = TempDir::new().unwrap(); + let mut ctx = make_test_context(dir.path()); + merge_config_into_cd_context(&cfg, &mut ctx); + assert!(ctx.migration_command_override.is_none()); + } + + #[test] + fn merge_empty_config_no_changes() { + let cfg = parse_config(""); + let dir = TempDir::new().unwrap(); + let mut ctx = make_test_context(dir.path()); + let original_platform = ctx.platform.clone(); + merge_config_into_cd_context(&cfg, &mut ctx); + assert_eq!(ctx.platform, original_platform); + } + + /// Creates a minimal CdContext for merge testing. + fn make_test_context(path: &Path) -> CdContext { + let analysis = crate::analyzer::analyze_project(path).unwrap(); + + CdContext { + analysis, + project_name: "test-project".to_string(), + platform: CdPlatform::Azure, + deploy_target: DeployTarget::AppService, + environments: vec![ + Environment { + name: "staging".to_string(), + requires_approval: false, + }, + Environment { + name: "production".to_string(), + requires_approval: true, + }, + ], + registry: Registry::Acr, + image_name: "test-project".to_string(), + has_terraform: false, + terraform_dir: None, + has_k8s_manifests: false, + k8s_manifest_dir: None, + has_helm_chart: false, + helm_chart_dir: None, + migration_tool: None, + migration_command_override: None, + health_check_path: None, + default_branch: "main".to_string(), + has_dockerfile: false, + } + } +} diff --git a/src/generator/cd_generation/cd_tests.rs b/src/generator/cd_generation/cd_tests.rs new file mode 100644 index 00000000..afb2bc35 --- /dev/null +++ b/src/generator/cd_generation/cd_tests.rs @@ -0,0 +1,464 @@ +//! CD-25 — Comprehensive Unit Tests for the CD Generator +//! +//! This module covers cross-cutting concerns that span multiple cd_generation +//! submodules: +//! - Full pipeline build → template render → YAML validation per platform +//! - Token cross-linking between CI and CD contexts +//! - Multi-environment structure validation +//! - Terraform wiring into the pipeline +//! - End-to-end dry-run simulation + +#[cfg(test)] +mod cd_snapshot_tests { + use crate::generator::cd_generation::{ + context::{CdPlatform, DeployTarget, Environment, MigrationTool, Registry}, + pipeline::build_cd_pipeline, + templates, + token_resolver::resolve_tokens, + }; + use tempfile::TempDir; + + // ── Fixture builder ─────────────────────────────────────────────────── + + fn make_context( + platform: CdPlatform, + target: DeployTarget, + ) -> crate::generator::cd_generation::context::CdContext { + let tmp = TempDir::new().unwrap(); + let analysis = crate::analyzer::analyze_project(tmp.path()).unwrap(); + crate::generator::cd_generation::context::CdContext { + analysis, + project_name: "snapshot-app".to_string(), + platform: platform.clone(), + deploy_target: target, + environments: vec![ + Environment { + name: "staging".to_string(), + requires_approval: false, + }, + Environment { + name: "production".to_string(), + requires_approval: true, + }, + ], + registry: match platform { + CdPlatform::Azure => Registry::Acr, + CdPlatform::Gcp => Registry::Gar, + CdPlatform::Hetzner => Registry::Ghcr, + }, + image_name: "snapshot-app".to_string(), + has_terraform: false, + terraform_dir: None, + has_k8s_manifests: false, + k8s_manifest_dir: None, + has_helm_chart: false, + helm_chart_dir: None, + migration_tool: None, + migration_command_override: None, + health_check_path: Some("/health".to_string()), + default_branch: "main".to_string(), + has_dockerfile: true, + } + } + + // ── Azure snapshots ─────────────────────────────────────────────────── + + #[test] + fn azure_app_service_yaml_is_valid() { + let ctx = make_context(CdPlatform::Azure, DeployTarget::AppService); + let mut pipeline = build_cd_pipeline(&ctx); + resolve_tokens(&ctx, &mut pipeline); + let yaml = templates::azure::render(&pipeline); + assert!(yaml.contains("name:"), "Missing workflow name"); + assert!(yaml.contains("on:"), "Missing trigger section"); + assert!(yaml.contains("jobs:"), "Missing jobs section"); + assert!(yaml.contains("azure/login@v2"), "Missing Azure login action"); + assert!(yaml.contains("snapshot-app"), "Missing project name"); + } + + #[test] + fn azure_aks_yaml_contains_kubectl() { + let ctx = make_context(CdPlatform::Azure, DeployTarget::Aks); + let pipeline = build_cd_pipeline(&ctx); + let yaml = templates::azure::render(&pipeline); + assert!(yaml.contains("kubectl") || yaml.contains("aks"), "Missing K8s deploy"); + } + + #[test] + fn azure_container_apps_yaml_valid() { + let ctx = make_context(CdPlatform::Azure, DeployTarget::ContainerApps); + let pipeline = build_cd_pipeline(&ctx); + let yaml = templates::azure::render(&pipeline); + assert!(yaml.contains("name:")); + assert!(yaml.contains("jobs:")); + } + + // ── GCP snapshots ───────────────────────────────────────────────────── + + #[test] + fn gcp_cloud_run_yaml_is_valid() { + let ctx = make_context(CdPlatform::Gcp, DeployTarget::CloudRun); + let mut pipeline = build_cd_pipeline(&ctx); + resolve_tokens(&ctx, &mut pipeline); + let yaml = templates::gcp::render(&pipeline); + assert!(yaml.contains("name:")); + assert!(yaml.contains("on:")); + assert!(yaml.contains("jobs:")); + assert!( + yaml.contains("google-github-actions/auth@v2"), + "Missing GCP auth action" + ); + } + + #[test] + fn gcp_gke_yaml_contains_k8s_deploy() { + let ctx = make_context(CdPlatform::Gcp, DeployTarget::Gke); + let pipeline = build_cd_pipeline(&ctx); + let yaml = templates::gcp::render(&pipeline); + assert!(yaml.contains("kubectl") || yaml.contains("gke"), "Missing GKE deploy"); + } + + // ── Hetzner snapshots ───────────────────────────────────────────────── + + #[test] + fn hetzner_vps_yaml_is_valid() { + let ctx = make_context(CdPlatform::Hetzner, DeployTarget::Vps); + let mut pipeline = build_cd_pipeline(&ctx); + resolve_tokens(&ctx, &mut pipeline); + let yaml = templates::hetzner::render(&pipeline); + assert!(yaml.contains("name:")); + assert!(yaml.contains("on:")); + assert!(yaml.contains("jobs:")); + assert!(yaml.contains("ssh") || yaml.contains("SSH"), "Missing SSH"); + } + + #[test] + fn hetzner_k8s_yaml_valid() { + let ctx = make_context(CdPlatform::Hetzner, DeployTarget::HetznerK8s); + let pipeline = build_cd_pipeline(&ctx); + let yaml = templates::hetzner::render(&pipeline); + assert!(yaml.contains("name:")); + assert!(yaml.contains("jobs:")); + } + + #[test] + fn hetzner_coolify_yaml_valid() { + let ctx = make_context(CdPlatform::Hetzner, DeployTarget::Coolify); + let pipeline = build_cd_pipeline(&ctx); + let yaml = templates::hetzner::render(&pipeline); + assert!(yaml.contains("name:")); + } + + // ── No hardcoded secrets ────────────────────────────────────────────── + + fn assert_no_hardcoded_secrets(yaml: &str) { + assert!(!yaml.contains("sk-"), "Contains hardcoded API key"); + assert!(!yaml.contains("ghp_"), "Contains hardcoded GitHub token"); + assert!(!yaml.contains("AKIA"), "Contains hardcoded AWS key"); + } + + #[test] + fn azure_yaml_no_hardcoded_secrets() { + let ctx = make_context(CdPlatform::Azure, DeployTarget::AppService); + let pipeline = build_cd_pipeline(&ctx); + let yaml = templates::azure::render(&pipeline); + assert_no_hardcoded_secrets(&yaml); + } + + #[test] + fn gcp_yaml_no_hardcoded_secrets() { + let ctx = make_context(CdPlatform::Gcp, DeployTarget::CloudRun); + let pipeline = build_cd_pipeline(&ctx); + let yaml = templates::gcp::render(&pipeline); + assert_no_hardcoded_secrets(&yaml); + } + + #[test] + fn hetzner_yaml_no_hardcoded_secrets() { + let ctx = make_context(CdPlatform::Hetzner, DeployTarget::Vps); + let pipeline = build_cd_pipeline(&ctx); + let yaml = templates::hetzner::render(&pipeline); + assert_no_hardcoded_secrets(&yaml); + } + + // ── Pipeline structure tests ────────────────────────────────────────── + + #[test] + fn pipeline_has_two_environments() { + let ctx = make_context(CdPlatform::Azure, DeployTarget::AppService); + let pipeline = build_cd_pipeline(&ctx); + assert_eq!(pipeline.environments.len(), 2); + assert_eq!(pipeline.environments[0].name, "staging"); + assert_eq!(pipeline.environments[1].name, "production"); + } + + #[test] + fn production_requires_approval() { + let ctx = make_context(CdPlatform::Azure, DeployTarget::AppService); + let pipeline = build_cd_pipeline(&ctx); + let prod = pipeline.environments.iter().find(|e| e.name == "production").unwrap(); + assert!(prod.requires_approval); + } + + #[test] + fn staging_no_approval() { + let ctx = make_context(CdPlatform::Azure, DeployTarget::AppService); + let pipeline = build_cd_pipeline(&ctx); + let staging = pipeline.environments.iter().find(|e| e.name == "staging").unwrap(); + assert!(!staging.requires_approval); + } + + #[test] + fn health_check_has_endpoint() { + let ctx = make_context(CdPlatform::Azure, DeployTarget::AppService); + let pipeline = build_cd_pipeline(&ctx); + // health_check is always present (non-Option) + assert!(!pipeline.health_check.url.is_empty()); + } + + #[test] + fn migration_present_when_tool_detected() { + let mut ctx = make_context(CdPlatform::Azure, DeployTarget::AppService); + ctx.migration_tool = Some(MigrationTool::Prisma); + let pipeline = build_cd_pipeline(&ctx); + assert!(pipeline.migration.is_some()); + assert!(pipeline.migration.as_ref().unwrap().command.contains("prisma")); + } + + #[test] + fn migration_absent_when_no_tool() { + let ctx = make_context(CdPlatform::Azure, DeployTarget::AppService); + let pipeline = build_cd_pipeline(&ctx); + assert!(pipeline.migration.is_none()); + } + + // ── Terraform wiring ────────────────────────────────────────────────── + + #[test] + fn terraform_step_present_when_has_terraform() { + let mut ctx = make_context(CdPlatform::Azure, DeployTarget::AppService); + ctx.has_terraform = true; + ctx.terraform_dir = Some(std::path::PathBuf::from("terraform")); + let pipeline = build_cd_pipeline(&ctx); + assert!(pipeline.terraform.is_some()); + assert_eq!(pipeline.terraform.as_ref().unwrap().working_directory, "terraform"); + } + + #[test] + fn terraform_step_absent_when_no_terraform() { + let ctx = make_context(CdPlatform::Azure, DeployTarget::AppService); + let pipeline = build_cd_pipeline(&ctx); + assert!(pipeline.terraform.is_none()); + } + + // ── Notification wiring ─────────────────────────────────────────────── + + #[test] + fn notification_always_present() { + let ctx = make_context(CdPlatform::Azure, DeployTarget::AppService); + let pipeline = build_cd_pipeline(&ctx); + assert!(pipeline.notifications.is_some()); + assert_eq!(pipeline.notifications.as_ref().unwrap().channel_type, "slack"); + } + + // ── Rollback info ───────────────────────────────────────────────────── + + #[test] + fn rollback_info_has_strategy() { + let ctx = make_context(CdPlatform::Azure, DeployTarget::AppService); + let pipeline = build_cd_pipeline(&ctx); + assert!(!pipeline.rollback_info.strategy.is_empty()); + } + + #[test] + fn rollback_info_has_command_hint() { + let ctx = make_context(CdPlatform::Azure, DeployTarget::AppService); + let pipeline = build_cd_pipeline(&ctx); + assert!(!pipeline.rollback_info.command_hint.is_empty()); + } + + // ── Token resolution ────────────────────────────────────────────────── + + #[test] + fn tokens_resolved_after_resolution() { + let ctx = make_context(CdPlatform::Azure, DeployTarget::AppService); + let mut pipeline = build_cd_pipeline(&ctx); + resolve_tokens(&ctx, &mut pipeline); + // After resolution, unresolved tokens should be minimal + } + + // ── Multi-platform consistency ──────────────────────────────────────── + + #[test] + fn all_platforms_produce_valid_yaml() { + let platforms = [ + (CdPlatform::Azure, DeployTarget::AppService), + (CdPlatform::Gcp, DeployTarget::CloudRun), + (CdPlatform::Hetzner, DeployTarget::Vps), + ]; + + for (platform, target) in &platforms { + let ctx = make_context(platform.clone(), target.clone()); + let mut pipeline = build_cd_pipeline(&ctx); + resolve_tokens(&ctx, &mut pipeline); + let yaml = match platform { + CdPlatform::Azure => templates::azure::render(&pipeline), + CdPlatform::Gcp => templates::gcp::render(&pipeline), + CdPlatform::Hetzner => templates::hetzner::render(&pipeline), + }; + assert!(yaml.contains("name:"), "Missing 'name:' for {:?}", platform); + assert!(yaml.contains("on:"), "Missing 'on:' for {:?}", platform); + assert!(yaml.contains("jobs:"), "Missing 'jobs:' for {:?}", platform); + assert_no_hardcoded_secrets(&yaml); + } + } +} + +#[cfg(test)] +mod cd_cross_linking_tests { + use crate::generator::cd_generation::{ + environments::{generate_environment_jobs, render_environment_jobs_yaml}, + rollback::{generate_rollback_script}, + versioning::{compute_image_tags, render_versioning_env_block, render_tag_resolution_step}, + dispatch::{generate_dispatch_inputs, render_dispatch_yaml}, + notification::{generate_notification_step, render_notification_yaml}, + terraform_step::{generate_terraform_step, render_terraform_yaml}, + reusable_workflow::{render_reusable_base, render_caller_job}, + context::{CdPlatform, DeployTarget}, + schema::{EnvironmentConfig, RollbackInfo}, + }; + + // ── Environment → dispatch consistency ──────────────────────────────── + + #[test] + fn dispatch_inputs_match_default_environments() { + let dispatch = generate_dispatch_inputs(&[]); + let env_input = &dispatch[1]; + if let crate::generator::cd_generation::dispatch::DispatchInputType::Choice { options } = + &env_input.input_type + { + // default dispatch options = development, staging, production + assert!(options.contains(&"development".to_string())); + assert!(options.contains(&"staging".to_string())); + assert!(options.contains(&"production".to_string())); + } + } + + #[test] + fn custom_environments_flow_to_dispatch() { + let envs = vec!["dev".to_string(), "prod".to_string()]; + let dispatch = generate_dispatch_inputs(&envs); + let env_input = &dispatch[1]; + if let crate::generator::cd_generation::dispatch::DispatchInputType::Choice { options } = + &env_input.input_type + { + assert_eq!(options.len(), 2); + } + } + + // ── Versioning + notification YAML composability ────────────────────── + + #[test] + fn versioning_env_block_combines_with_notification() { + let tags = compute_image_tags("ghcr.io", "my-app"); + let env_block = render_versioning_env_block(&tags); + let notif_step = generate_notification_step("SLACK_WEBHOOK_URL", true, true); + let notif_yaml = render_notification_yaml(¬if_step); + + // Both should be valid YAML fragments that can be placed in the same file + assert!(env_block.contains("IMAGE_TAG")); + assert!(notif_yaml.contains("Notify Slack")); + } + + // ── Terraform + rollback consistency ────────────────────────────────── + + #[test] + fn terraform_yaml_and_rollback_both_reference_image_tag() { + let tf_step = generate_terraform_step("terraform", false); + let tf_yaml = render_terraform_yaml(&tf_step, "main"); + assert!(tf_yaml.contains("IMAGE_TAG"), "Terraform should reference IMAGE_TAG"); + + let rollback_info = RollbackInfo { + strategy: "redeploy-previous".to_string(), + command_hint: "az webapp deployment slot swap".to_string(), + }; + let rollback_script = generate_rollback_script( + &CdPlatform::Azure, + &DeployTarget::AppService, + &rollback_info, + ); + assert!(!rollback_script.is_empty(), "Rollback script should not be empty"); + } + + // ── Reusable workflow + environment integration ─────────────────────── + + #[test] + fn reusable_base_renders_for_all_platforms() { + for platform in &[CdPlatform::Azure, CdPlatform::Gcp, CdPlatform::Hetzner] { + let target = match platform { + CdPlatform::Azure => DeployTarget::AppService, + CdPlatform::Gcp => DeployTarget::CloudRun, + CdPlatform::Hetzner => DeployTarget::Vps, + }; + let base = render_reusable_base(platform, &target, "my-app"); + assert!(base.contains("workflow_call"), "Missing workflow_call for {:?}", platform); + } + } + + #[test] + fn caller_job_references_environment() { + let caller = render_caller_job("staging", "${{ env.IMAGE_TAG }}", Some("build")); + assert!(caller.contains("staging")); + assert!(caller.contains("IMAGE_TAG")); + } + + // ── Environment jobs generate yaml ──────────────────────────────────── + + #[test] + fn environment_jobs_render_correct_yaml() { + let envs = vec![ + EnvironmentConfig { + name: "staging".to_string(), + branch_filter: None, + requires_approval: false, + app_url: None, + namespace: None, + replicas: None, + }, + EnvironmentConfig { + name: "production".to_string(), + branch_filter: None, + requires_approval: true, + app_url: None, + namespace: None, + replicas: None, + }, + ]; + let jobs = generate_environment_jobs(&envs); + assert_eq!(jobs.len(), 2); + let yaml = render_environment_jobs_yaml(&jobs); + assert!(yaml.contains("staging")); + assert!(yaml.contains("production")); + } + + // ── Dispatch yaml renders ───────────────────────────────────────────── + + #[test] + fn full_dispatch_yaml_renders() { + let inputs = generate_dispatch_inputs(&[]); + let yaml = render_dispatch_yaml(&inputs); + assert!(yaml.contains("workflow_dispatch:")); + assert!(yaml.contains("image_tag:")); + assert!(yaml.contains("environment:")); + assert!(yaml.contains("dry_run:")); + } + + // ── Tag resolution step is valid ────────────────────────────────────── + + #[test] + fn tag_resolution_step_yaml() { + let step = render_tag_resolution_step(); + assert!(step.contains("Compute image tags")); + assert!(step.contains("GITHUB_OUTPUT")); + } +} diff --git a/src/generator/cd_generation/context.rs b/src/generator/cd_generation/context.rs new file mode 100644 index 00000000..9c2c3bd8 --- /dev/null +++ b/src/generator/cd_generation/context.rs @@ -0,0 +1,679 @@ +//! CD-02 — `CdContext` and `collect_cd_context` entry point. +//! +//! Captures everything needed to build a CD pipeline skeleton. The context +//! collector calls the existing `ProjectAnalysis` and enriches it with +//! deployment-specific detection: Terraform directories, K8s manifests, +//! Helm charts, database migration tools, and health check paths. + +use std::path::{Path, PathBuf}; +use std::process::Command; + +use serde::Serialize; + +use crate::analyzer::{analyze_project, ProjectAnalysis}; + +// ── Platform & target enums ────────────────────────────────────────────────── + +/// Cloud platform for CD deployment. +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +pub enum CdPlatform { + Azure, + Gcp, + Hetzner, +} + +/// Concrete deployment target within a platform. +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +pub enum DeployTarget { + /// Azure App Service (PaaS) + AppService, + /// Azure Kubernetes Service + Aks, + /// Azure Container Apps + ContainerApps, + /// Google Cloud Run (serverless containers) + CloudRun, + /// Google Kubernetes Engine + Gke, + /// Hetzner VPS via SSH + Docker Compose + Vps, + /// Hetzner-managed Kubernetes (hcloud) + HetznerK8s, + /// Coolify self-hosted PaaS on Hetzner + Coolify, +} + +impl std::fmt::Display for DeployTarget { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let s = match self { + Self::AppService => "app-service", + Self::Aks => "aks", + Self::ContainerApps => "container-apps", + Self::CloudRun => "cloud-run", + Self::Gke => "gke", + Self::Vps => "vps", + Self::HetznerK8s => "hetzner-k8s", + Self::Coolify => "coolify", + }; + write!(f, "{}", s) + } +} + +/// Container registry type. +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +pub enum Registry { + /// Azure Container Registry + Acr, + /// Google Artifact Registry + Gar, + /// GitHub Container Registry + Ghcr, + /// User-provided custom registry URL + Custom(String), +} + +impl std::fmt::Display for Registry { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Acr => write!(f, "acr"), + Self::Gar => write!(f, "gar"), + Self::Ghcr => write!(f, "ghcr"), + Self::Custom(url) => write!(f, "custom({})", url), + } + } +} + +/// Database migration tool detected in the project. +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +pub enum MigrationTool { + Flyway, + Liquibase, + Alembic, + DjangoMigrations, + Prisma, + Sqlx, + Diesel, +} + +impl std::fmt::Display for MigrationTool { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let s = match self { + Self::Flyway => "flyway", + Self::Liquibase => "liquibase", + Self::Alembic => "alembic", + Self::DjangoMigrations => "django", + Self::Prisma => "prisma", + Self::Sqlx => "sqlx", + Self::Diesel => "diesel", + }; + write!(f, "{}", s) + } +} + +/// A deployment environment (dev, staging, production). +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +pub struct Environment { + pub name: String, + /// Whether this environment requires manual approval before deploy. + pub requires_approval: bool, +} + +// ── Primary struct ──────────────────────────────────────────────────────────── + +/// Enriched snapshot of a project consumed by all CD generators. +#[derive(Debug, Clone, Serialize)] +pub struct CdContext { + /// Raw analyzer output; available to generators that need additional fields. + pub analysis: ProjectAnalysis, + /// Human-readable project name (from Cargo.toml, package.json, or dir name). + pub project_name: String, + /// Target cloud platform. + pub platform: CdPlatform, + /// Concrete deployment target within the platform. + pub deploy_target: DeployTarget, + /// Ordered list of deployment environments. + pub environments: Vec, + /// Container registry to push images to. + pub registry: Registry, + /// Docker image name (without registry prefix or tag). + pub image_name: String, + /// Whether a Terraform directory was detected. + pub has_terraform: bool, + /// Path to the Terraform directory, if detected. + pub terraform_dir: Option, + /// Whether Kubernetes manifest files were detected. + pub has_k8s_manifests: bool, + /// Path to the Kubernetes manifest directory, if detected. + pub k8s_manifest_dir: Option, + /// Whether a Helm chart was detected. + pub has_helm_chart: bool, + /// Path to the Helm chart directory, if detected. + pub helm_chart_dir: Option, + /// Database migration tool detected, if any. + pub migration_tool: Option, + /// Custom migration command from `.syncable.cd.toml`, overrides the + /// tool-derived default when set. + pub migration_command_override: Option, + /// Health check endpoint path (e.g. `/health`, `/healthz`). + pub health_check_path: Option, + /// Default git branch name. + pub default_branch: String, + /// Whether the project has a Dockerfile. + pub has_dockerfile: bool, +} + +// ── Detection helpers ───────────────────────────────────────────────────────── + +/// Detect the project name from the analysis metadata. +fn detect_project_name(analysis: &ProjectAnalysis) -> String { + analysis + .project_root + .file_name() + .map(|n| n.to_string_lossy().into_owned()) + .unwrap_or_else(|| "project".to_string()) +} + +/// Returns the upstream default branch via `git symbolic-ref`; falls back to `"main"`. +fn detect_default_branch(path: &Path) -> String { + let output = Command::new("git") + .args(["symbolic-ref", "refs/remotes/origin/HEAD"]) + .current_dir(path) + .output(); + + match output { + Ok(out) if out.status.success() => { + let raw = String::from_utf8_lossy(&out.stdout); + raw.trim() + .rsplit('/') + .next() + .unwrap_or("main") + .to_string() + } + _ => "main".to_string(), + } +} + +/// Detect a Terraform directory at the project root. +fn detect_terraform(root: &Path) -> Option { + // Check common Terraform directory names + let candidates = ["terraform", "tf", "infra", "infrastructure"]; + for name in &candidates { + let dir = root.join(name); + if dir.is_dir() && has_tf_files(&dir) { + return Some(dir); + } + } + // Check root for main.tf + if root.join("main.tf").exists() { + return Some(root.to_path_buf()); + } + None +} + +/// Returns true if the directory contains `.tf` files. +fn has_tf_files(dir: &Path) -> bool { + std::fs::read_dir(dir) + .into_iter() + .flatten() + .flatten() + .any(|e| { + e.path() + .extension() + .map(|ext| ext == "tf") + .unwrap_or(false) + }) +} + +/// Detect Kubernetes manifest directories. +fn detect_k8s_manifests(root: &Path) -> Option { + let candidates = ["k8s", "kubernetes", "manifests", "deploy", "kube"]; + for name in &candidates { + let dir = root.join(name); + if dir.is_dir() && has_k8s_yamls(&dir) { + return Some(dir); + } + } + None +} + +/// Returns true if the directory contains YAML files with `apiVersion:` and `kind:`. +fn has_k8s_yamls(dir: &Path) -> bool { + std::fs::read_dir(dir) + .into_iter() + .flatten() + .flatten() + .any(|e| { + let p = e.path(); + let is_yaml = p + .extension() + .map(|ext| ext == "yml" || ext == "yaml") + .unwrap_or(false); + if !is_yaml { + return false; + } + std::fs::read_to_string(&p) + .map(|content| content.contains("apiVersion:") && content.contains("kind:")) + .unwrap_or(false) + }) +} + +/// Detect a Helm chart directory. +fn detect_helm_chart(root: &Path) -> Option { + // Chart.yaml at root + if root.join("Chart.yaml").exists() { + return Some(root.to_path_buf()); + } + // Common chart subdirectories + let candidates = ["chart", "helm", "charts"]; + for name in &candidates { + let dir = root.join(name); + if dir.join("Chart.yaml").exists() { + return Some(dir); + } + } + None +} + +/// Detect database migration tool from project file markers. +fn detect_migration_tool(root: &Path) -> Option { + // Prisma — schema.prisma + migrations directory + if root.join("prisma").join("schema.prisma").exists() + || root.join("schema.prisma").exists() + { + return Some(MigrationTool::Prisma); + } + // Diesel — diesel.toml + if root.join("diesel.toml").exists() { + return Some(MigrationTool::Diesel); + } + // sqlx — sqlx-data.json or .sqlx directory + if root.join("sqlx-data.json").exists() || root.join(".sqlx").is_dir() { + return Some(MigrationTool::Sqlx); + } + // Alembic — alembic.ini + if root.join("alembic.ini").exists() { + return Some(MigrationTool::Alembic); + } + // Django — manage.py (with migrations directory somewhere) + if root.join("manage.py").exists() { + return Some(MigrationTool::DjangoMigrations); + } + // Flyway — flyway.conf or db/migration directory + if root.join("flyway.conf").exists() + || root.join("db").join("migration").is_dir() + { + return Some(MigrationTool::Flyway); + } + // Liquibase — liquibase.properties + if root.join("liquibase.properties").exists() { + return Some(MigrationTool::Liquibase); + } + None +} + +/// Detect health check endpoint by scanning for common route patterns in source files. +fn detect_health_check_path(root: &Path) -> Option { + // Check common locations for health endpoint definitions + let src_dirs = ["src", "app", "server", "api", "lib"]; + + for dir_name in &src_dirs { + let dir = root.join(dir_name); + if !dir.is_dir() { + continue; + } + if let Some(path) = scan_dir_for_health_route(&dir, 0) { + return Some(path); + } + } + // Also check root-level files (e.g. main.py, app.py, server.js) + scan_dir_for_health_route(root, 0) +} + +/// Recursively scan source files for health endpoint route definitions. +fn scan_dir_for_health_route(dir: &Path, depth: usize) -> Option { + if depth > 3 { + return None; + } + let entries = match std::fs::read_dir(dir) { + Ok(e) => e, + Err(_) => return None, + }; + for entry in entries.flatten() { + let path = entry.path(); + if path.is_dir() { + let name = path.file_name().and_then(|n| n.to_str()).unwrap_or(""); + if !name.starts_with('.') + && name != "node_modules" + && name != "target" + && let Some(found) = scan_dir_for_health_route(&path, depth + 1) + { + return Some(found); + } + } else if path.is_file() + && let Some(found) = check_file_for_health_route(&path) + { + return Some(found); + } + } + None +} + +/// Check a single file for common health endpoint patterns. +fn check_file_for_health_route(path: &Path) -> Option { + let ext = path.extension().and_then(|e| e.to_str()).unwrap_or(""); + match ext { + "rs" | "py" | "js" | "ts" | "go" | "java" | "kt" => {} + _ => return None, + } + let content = std::fs::read_to_string(path).ok()?; + // Check for common health route patterns + let patterns = [ + "/healthz", + "/health", + "/api/health", + "/api/healthz", + "/_health", + ]; + for pattern in &patterns { + if content.contains(pattern) { + return Some(pattern.to_string()); + } + } + None +} + +/// Default registry for a platform. +fn default_registry(platform: &CdPlatform) -> Registry { + match platform { + CdPlatform::Azure => Registry::Acr, + CdPlatform::Gcp => Registry::Gar, + CdPlatform::Hetzner => Registry::Ghcr, + } +} + +/// Default deploy target for a platform. +fn default_deploy_target(platform: &CdPlatform) -> DeployTarget { + match platform { + CdPlatform::Azure => DeployTarget::AppService, + CdPlatform::Gcp => DeployTarget::CloudRun, + CdPlatform::Hetzner => DeployTarget::Vps, + } +} + +/// Default environments when none are specified. +fn default_environments() -> Vec { + vec![ + Environment { + name: "staging".to_string(), + requires_approval: false, + }, + Environment { + name: "production".to_string(), + requires_approval: true, + }, + ] +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Runs the project analyzer and assembles a `CdContext` for the given path. +/// +/// The `deploy_target` and `environments` parameters are optional; sensible +/// defaults are provided based on the chosen platform. +pub fn collect_cd_context( + path: &Path, + platform: CdPlatform, + deploy_target: Option, + environments: Option>, + registry_override: Option, + image_name_override: Option, +) -> crate::Result { + let analysis = analyze_project(path)?; + + let project_name = detect_project_name(&analysis); + let default_branch = detect_default_branch(path); + let has_dockerfile = analysis.docker_analysis.is_some(); + + let root = &analysis.project_root; + + // Detect infrastructure + let terraform_dir = detect_terraform(root); + let has_terraform = terraform_dir.is_some(); + + let k8s_manifest_dir = detect_k8s_manifests(root); + let has_k8s_manifests = k8s_manifest_dir.is_some(); + + let helm_chart_dir = detect_helm_chart(root); + let has_helm_chart = helm_chart_dir.is_some(); + + // Detect migration tool + let migration_tool = detect_migration_tool(root); + + // Detect health check path + let health_check_path = detect_health_check_path(root); + + // Resolve defaults + let deploy_target = deploy_target.unwrap_or_else(|| default_deploy_target(&platform)); + let environments = environments.unwrap_or_else(default_environments); + let registry = registry_override.unwrap_or_else(|| default_registry(&platform)); + let image_name = image_name_override.unwrap_or_else(|| project_name.clone()); + + Ok(CdContext { + analysis, + project_name, + platform, + deploy_target, + environments, + registry, + image_name, + has_terraform, + terraform_dir, + has_k8s_manifests, + k8s_manifest_dir, + has_helm_chart, + helm_chart_dir, + migration_tool, + migration_command_override: None, + health_check_path, + default_branch, + has_dockerfile, + }) +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + #[test] + fn test_default_registry_per_platform() { + assert_eq!(default_registry(&CdPlatform::Azure), Registry::Acr); + assert_eq!(default_registry(&CdPlatform::Gcp), Registry::Gar); + assert_eq!(default_registry(&CdPlatform::Hetzner), Registry::Ghcr); + } + + #[test] + fn test_default_deploy_target_per_platform() { + assert_eq!(default_deploy_target(&CdPlatform::Azure), DeployTarget::AppService); + assert_eq!(default_deploy_target(&CdPlatform::Gcp), DeployTarget::CloudRun); + assert_eq!(default_deploy_target(&CdPlatform::Hetzner), DeployTarget::Vps); + } + + #[test] + fn test_default_environments() { + let envs = default_environments(); + assert_eq!(envs.len(), 2); + assert_eq!(envs[0].name, "staging"); + assert!(!envs[0].requires_approval); + assert_eq!(envs[1].name, "production"); + assert!(envs[1].requires_approval); + } + + #[test] + fn test_detect_terraform_main_tf_at_root() { + let dir = TempDir::new().unwrap(); + std::fs::write(dir.path().join("main.tf"), "resource {}").unwrap(); + + let result = detect_terraform(dir.path()); + assert_eq!(result, Some(dir.path().to_path_buf())); + } + + #[test] + fn test_detect_terraform_in_subdir() { + let dir = TempDir::new().unwrap(); + let tf_dir = dir.path().join("terraform"); + std::fs::create_dir(&tf_dir).unwrap(); + std::fs::write(tf_dir.join("main.tf"), "resource {}").unwrap(); + + let result = detect_terraform(dir.path()); + assert_eq!(result, Some(tf_dir)); + } + + #[test] + fn test_detect_terraform_none() { + let dir = TempDir::new().unwrap(); + assert_eq!(detect_terraform(dir.path()), None); + } + + #[test] + fn test_detect_k8s_manifests() { + let dir = TempDir::new().unwrap(); + let k8s_dir = dir.path().join("k8s"); + std::fs::create_dir(&k8s_dir).unwrap(); + std::fs::write( + k8s_dir.join("deployment.yaml"), + "apiVersion: apps/v1\nkind: Deployment\n", + ) + .unwrap(); + + let result = detect_k8s_manifests(dir.path()); + assert_eq!(result, Some(k8s_dir)); + } + + #[test] + fn test_detect_k8s_manifests_none() { + let dir = TempDir::new().unwrap(); + assert_eq!(detect_k8s_manifests(dir.path()), None); + } + + #[test] + fn test_detect_helm_chart_at_root() { + let dir = TempDir::new().unwrap(); + std::fs::write(dir.path().join("Chart.yaml"), "name: my-chart").unwrap(); + + let result = detect_helm_chart(dir.path()); + assert_eq!(result, Some(dir.path().to_path_buf())); + } + + #[test] + fn test_detect_helm_chart_in_subdir() { + let dir = TempDir::new().unwrap(); + let chart_dir = dir.path().join("chart"); + std::fs::create_dir(&chart_dir).unwrap(); + std::fs::write(chart_dir.join("Chart.yaml"), "name: my-chart").unwrap(); + + let result = detect_helm_chart(dir.path()); + assert_eq!(result, Some(chart_dir)); + } + + #[test] + fn test_detect_migration_prisma() { + let dir = TempDir::new().unwrap(); + let prisma_dir = dir.path().join("prisma"); + std::fs::create_dir(&prisma_dir).unwrap(); + std::fs::write(prisma_dir.join("schema.prisma"), "model User {}").unwrap(); + + assert_eq!(detect_migration_tool(dir.path()), Some(MigrationTool::Prisma)); + } + + #[test] + fn test_detect_migration_diesel() { + let dir = TempDir::new().unwrap(); + std::fs::write(dir.path().join("diesel.toml"), "[print_schema]").unwrap(); + + assert_eq!(detect_migration_tool(dir.path()), Some(MigrationTool::Diesel)); + } + + #[test] + fn test_detect_migration_alembic() { + let dir = TempDir::new().unwrap(); + std::fs::write(dir.path().join("alembic.ini"), "[alembic]").unwrap(); + + assert_eq!(detect_migration_tool(dir.path()), Some(MigrationTool::Alembic)); + } + + #[test] + fn test_detect_migration_none() { + let dir = TempDir::new().unwrap(); + assert_eq!(detect_migration_tool(dir.path()), None); + } + + #[test] + fn test_detect_health_check_in_source() { + let dir = TempDir::new().unwrap(); + let src = dir.path().join("src"); + std::fs::create_dir(&src).unwrap(); + std::fs::write( + src.join("main.rs"), + r#"fn main() { router.get("/health", health_handler); }"#, + ) + .unwrap(); + + let result = detect_health_check_path(dir.path()); + assert_eq!(result, Some("/health".to_string())); + } + + #[test] + fn test_detect_health_check_healthz() { + let dir = TempDir::new().unwrap(); + let src = dir.path().join("src"); + std::fs::create_dir(&src).unwrap(); + std::fs::write( + src.join("app.py"), + r#"@app.get("/healthz") def healthz(): return "ok""#, + ) + .unwrap(); + + let result = detect_health_check_path(dir.path()); + // /healthz is checked before /health + assert_eq!(result, Some("/healthz".to_string())); + } + + #[test] + fn test_detect_health_check_none() { + let dir = TempDir::new().unwrap(); + let src = dir.path().join("src"); + std::fs::create_dir(&src).unwrap(); + std::fs::write(src.join("main.rs"), "fn main() {}").unwrap(); + + assert_eq!(detect_health_check_path(dir.path()), None); + } + + #[test] + fn test_deploy_target_display() { + assert_eq!(DeployTarget::AppService.to_string(), "app-service"); + assert_eq!(DeployTarget::CloudRun.to_string(), "cloud-run"); + assert_eq!(DeployTarget::Vps.to_string(), "vps"); + assert_eq!(DeployTarget::HetznerK8s.to_string(), "hetzner-k8s"); + } + + #[test] + fn test_registry_display() { + assert_eq!(Registry::Acr.to_string(), "acr"); + assert_eq!(Registry::Gar.to_string(), "gar"); + assert_eq!(Registry::Ghcr.to_string(), "ghcr"); + assert_eq!( + Registry::Custom("my.registry.io".to_string()).to_string(), + "custom(my.registry.io)" + ); + } + + #[test] + fn test_migration_tool_display() { + assert_eq!(MigrationTool::Prisma.to_string(), "prisma"); + assert_eq!(MigrationTool::Diesel.to_string(), "diesel"); + assert_eq!(MigrationTool::Flyway.to_string(), "flyway"); + } +} diff --git a/src/generator/cd_generation/deploy_azure.rs b/src/generator/cd_generation/deploy_azure.rs new file mode 100644 index 00000000..79ae8a87 --- /dev/null +++ b/src/generator/cd_generation/deploy_azure.rs @@ -0,0 +1,326 @@ +//! CD-07 — Azure Deploy Step Generator +//! +//! Generates GitHub Actions YAML snippets for Azure deployment targets: +//! +//! | Target | Action | Key params | +//! |-----------------|--------------------------------------|-------------------------------| +//! | App Service | `azure/webapps-deploy@v3` | `app-name`, `images` | +//! | AKS | `azure/k8s-deploy@v5` | `namespace`, `manifests` | +//! | Container Apps | `azure/container-apps-deploy@v2` | `containerAppName`, `image` | +//! +//! Each function returns a `DeployStep` for the schema and a YAML snippet +//! string for direct embedding. Rollback hints are also provided per target. + +use super::context::DeployTarget; +use super::schema::{DeployStep, RollbackInfo}; + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Generates the deploy step for the given Azure target. +pub fn generate_azure_deploy(target: &DeployTarget, image_tag: &str) -> DeployStep { + match target { + DeployTarget::AppService => DeployStep { + strategy: "rolling".to_string(), + command: "azure/webapps-deploy@v3".to_string(), + args: vec![ + "app-name={{APP_NAME}}".to_string(), + format!("images={image_tag}"), + ], + target: target.clone(), + }, + DeployTarget::Aks => DeployStep { + strategy: "rolling".to_string(), + command: "azure/k8s-deploy@v5".to_string(), + args: vec![ + "namespace={{K8S_NAMESPACE}}".to_string(), + "manifests={{K8S_MANIFEST_DIR}}".to_string(), + format!("images={image_tag}"), + ], + target: target.clone(), + }, + DeployTarget::ContainerApps => DeployStep { + strategy: "rolling".to_string(), + command: "azure/container-apps-deploy@v2".to_string(), + args: vec![ + "containerAppName={{APP_NAME}}".to_string(), + "resourceGroup={{RESOURCE_GROUP}}".to_string(), + format!("imageToDeploy={image_tag}"), + ], + target: target.clone(), + }, + // Non-Azure targets should not reach here; return a sensible fallback. + other => DeployStep { + strategy: "rolling".to_string(), + command: format!("echo 'Unsupported Azure target: {other}'"), + args: vec![], + target: other.clone(), + }, + } +} + +/// Generates rollback info for the given Azure target. +pub fn azure_rollback_info(target: &DeployTarget) -> RollbackInfo { + match target { + DeployTarget::AppService => RollbackInfo { + strategy: "redeploy-previous".to_string(), + command_hint: "az webapp deployment slot swap --resource-group {{RESOURCE_GROUP}} --name {{APP_NAME}} --slot staging --target-slot production".to_string(), + }, + DeployTarget::Aks => RollbackInfo { + strategy: "rollout-undo".to_string(), + command_hint: "kubectl rollout undo deployment/{{DEPLOYMENT_NAME}} -n {{K8S_NAMESPACE}}".to_string(), + }, + DeployTarget::ContainerApps => RollbackInfo { + strategy: "redeploy-previous".to_string(), + command_hint: "az containerapp revision activate --name {{APP_NAME}} --resource-group {{RESOURCE_GROUP}} --revision ".to_string(), + }, + _ => RollbackInfo { + strategy: "manual".to_string(), + command_hint: "Manually redeploy the previous version".to_string(), + }, + } +} + +/// Renders the App Service deploy step as a GitHub Actions YAML snippet. +pub fn render_app_service_deploy_yaml(image_tag: &str) -> String { + format!( + "\ + - name: Deploy to Azure App Service + uses: azure/webapps-deploy@v3 + with: + app-name: ${{{{ secrets.APP_NAME }}}} + images: {image_tag}\n" + ) +} + +/// Renders the AKS deploy step as a GitHub Actions YAML snippet. +pub fn render_aks_deploy_yaml(image_tag: &str) -> String { + format!( + "\ + - name: Set AKS context + uses: azure/aks-set-context@v4 + with: + resource-group: ${{{{ secrets.RESOURCE_GROUP }}}} + cluster-name: ${{{{ secrets.AKS_CLUSTER_NAME }}}} + + - name: Deploy to AKS + uses: azure/k8s-deploy@v5 + with: + namespace: ${{{{ secrets.K8S_NAMESPACE }}}} + manifests: | + ${{{{ secrets.K8S_MANIFEST_DIR }}}}/deployment.yaml + ${{{{ secrets.K8S_MANIFEST_DIR }}}}/service.yaml + images: {image_tag}\n" + ) +} + +/// Renders the Container Apps deploy step as a GitHub Actions YAML snippet. +pub fn render_container_apps_deploy_yaml(image_tag: &str) -> String { + format!( + "\ + - name: Deploy to Azure Container Apps + uses: azure/container-apps-deploy@v2 + with: + containerAppName: ${{{{ secrets.APP_NAME }}}} + resourceGroup: ${{{{ secrets.RESOURCE_GROUP }}}} + imageToDeploy: {image_tag}\n" + ) +} + +/// Renders the deploy YAML snippet for any Azure target. +pub fn render_azure_deploy_yaml(target: &DeployTarget, image_tag: &str) -> String { + match target { + DeployTarget::AppService => render_app_service_deploy_yaml(image_tag), + DeployTarget::Aks => render_aks_deploy_yaml(image_tag), + DeployTarget::ContainerApps => render_container_apps_deploy_yaml(image_tag), + _ => format!(" - name: Deploy\n run: echo 'Unsupported Azure target'\n"), + } +} + +/// Returns secrets required for the Azure deploy target. +pub fn azure_deploy_required_secrets(target: &DeployTarget) -> Vec { + match target { + DeployTarget::AppService => vec![ + "APP_NAME".to_string(), + ], + DeployTarget::Aks => vec![ + "RESOURCE_GROUP".to_string(), + "AKS_CLUSTER_NAME".to_string(), + "K8S_NAMESPACE".to_string(), + "K8S_MANIFEST_DIR".to_string(), + ], + DeployTarget::ContainerApps => vec![ + "APP_NAME".to_string(), + "RESOURCE_GROUP".to_string(), + ], + _ => vec![], + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + const IMAGE: &str = "myacr.azurecr.io/app:sha123"; + + // ── generate_azure_deploy ───────────────────────────────────────── + + #[test] + fn app_service_deploy_step_uses_correct_action() { + let step = generate_azure_deploy(&DeployTarget::AppService, IMAGE); + assert_eq!(step.command, "azure/webapps-deploy@v3"); + } + + #[test] + fn app_service_deploy_step_strategy_is_rolling() { + let step = generate_azure_deploy(&DeployTarget::AppService, IMAGE); + assert_eq!(step.strategy, "rolling"); + } + + #[test] + fn app_service_deploy_step_contains_app_name_placeholder() { + let step = generate_azure_deploy(&DeployTarget::AppService, IMAGE); + assert!(step.args.iter().any(|a| a.contains("{{APP_NAME}}"))); + } + + #[test] + fn app_service_deploy_step_contains_image_tag() { + let step = generate_azure_deploy(&DeployTarget::AppService, IMAGE); + assert!(step.args.iter().any(|a| a.contains(IMAGE))); + } + + #[test] + fn aks_deploy_step_uses_correct_action() { + let step = generate_azure_deploy(&DeployTarget::Aks, IMAGE); + assert_eq!(step.command, "azure/k8s-deploy@v5"); + } + + #[test] + fn aks_deploy_step_contains_namespace_placeholder() { + let step = generate_azure_deploy(&DeployTarget::Aks, IMAGE); + assert!(step.args.iter().any(|a| a.contains("{{K8S_NAMESPACE}}"))); + } + + #[test] + fn aks_deploy_step_contains_manifest_dir_placeholder() { + let step = generate_azure_deploy(&DeployTarget::Aks, IMAGE); + assert!(step.args.iter().any(|a| a.contains("{{K8S_MANIFEST_DIR}}"))); + } + + #[test] + fn container_apps_deploy_step_uses_correct_action() { + let step = generate_azure_deploy(&DeployTarget::ContainerApps, IMAGE); + assert_eq!(step.command, "azure/container-apps-deploy@v2"); + } + + #[test] + fn container_apps_deploy_step_contains_resource_group() { + let step = generate_azure_deploy(&DeployTarget::ContainerApps, IMAGE); + assert!(step.args.iter().any(|a| a.contains("{{RESOURCE_GROUP}}"))); + } + + #[test] + fn container_apps_deploy_step_target_preserved() { + let step = generate_azure_deploy(&DeployTarget::ContainerApps, IMAGE); + assert_eq!(step.target, DeployTarget::ContainerApps); + } + + // ── azure_rollback_info ─────────────────────────────────────────── + + #[test] + fn app_service_rollback_strategy() { + let info = azure_rollback_info(&DeployTarget::AppService); + assert_eq!(info.strategy, "redeploy-previous"); + } + + #[test] + fn app_service_rollback_uses_slot_swap() { + let info = azure_rollback_info(&DeployTarget::AppService); + assert!(info.command_hint.contains("slot swap")); + } + + #[test] + fn aks_rollback_uses_rollout_undo() { + let info = azure_rollback_info(&DeployTarget::Aks); + assert_eq!(info.strategy, "rollout-undo"); + assert!(info.command_hint.contains("rollout undo")); + } + + #[test] + fn container_apps_rollback_activates_previous_revision() { + let info = azure_rollback_info(&DeployTarget::ContainerApps); + assert!(info.command_hint.contains("revision activate")); + } + + // ── render_azure_deploy_yaml ────────────────────────────────────── + + #[test] + fn app_service_yaml_contains_action() { + let yaml = render_azure_deploy_yaml(&DeployTarget::AppService, IMAGE); + assert!(yaml.contains("azure/webapps-deploy@v3")); + } + + #[test] + fn app_service_yaml_contains_image() { + let yaml = render_azure_deploy_yaml(&DeployTarget::AppService, IMAGE); + assert!(yaml.contains(IMAGE)); + } + + #[test] + fn app_service_yaml_references_app_name_secret() { + let yaml = render_azure_deploy_yaml(&DeployTarget::AppService, IMAGE); + assert!(yaml.contains("secrets.APP_NAME")); + } + + #[test] + fn aks_yaml_contains_k8s_deploy_action() { + let yaml = render_azure_deploy_yaml(&DeployTarget::Aks, IMAGE); + assert!(yaml.contains("azure/k8s-deploy@v5")); + } + + #[test] + fn aks_yaml_contains_set_context() { + let yaml = render_azure_deploy_yaml(&DeployTarget::Aks, IMAGE); + assert!(yaml.contains("azure/aks-set-context@v4")); + } + + #[test] + fn aks_yaml_references_cluster_name() { + let yaml = render_azure_deploy_yaml(&DeployTarget::Aks, IMAGE); + assert!(yaml.contains("secrets.AKS_CLUSTER_NAME")); + } + + #[test] + fn container_apps_yaml_contains_action() { + let yaml = render_azure_deploy_yaml(&DeployTarget::ContainerApps, IMAGE); + assert!(yaml.contains("azure/container-apps-deploy@v2")); + } + + #[test] + fn container_apps_yaml_references_resource_group() { + let yaml = render_azure_deploy_yaml(&DeployTarget::ContainerApps, IMAGE); + assert!(yaml.contains("secrets.RESOURCE_GROUP")); + } + + // ── azure_deploy_required_secrets ───────────────────────────────── + + #[test] + fn app_service_requires_app_name() { + let secrets = azure_deploy_required_secrets(&DeployTarget::AppService); + assert!(secrets.contains(&"APP_NAME".to_string())); + } + + #[test] + fn aks_requires_four_secrets() { + let secrets = azure_deploy_required_secrets(&DeployTarget::Aks); + assert_eq!(secrets.len(), 4); + } + + #[test] + fn container_apps_requires_app_name_and_resource_group() { + let secrets = azure_deploy_required_secrets(&DeployTarget::ContainerApps); + assert!(secrets.contains(&"APP_NAME".to_string())); + assert!(secrets.contains(&"RESOURCE_GROUP".to_string())); + } +} diff --git a/src/generator/cd_generation/deploy_gcp.rs b/src/generator/cd_generation/deploy_gcp.rs new file mode 100644 index 00000000..e9e0d3aa --- /dev/null +++ b/src/generator/cd_generation/deploy_gcp.rs @@ -0,0 +1,281 @@ +//! CD-08 — GCP Deploy Step Generator +//! +//! Generates GitHub Actions YAML snippets for GCP deployment targets: +//! +//! | Target | Action / Command | Key params | +//! |------------|--------------------------------------------------|-------------------------| +//! | Cloud Run | `google-github-actions/deploy-cloudrun@v2` | `service`, `image` | +//! | GKE | `google-github-actions/get-gke-credentials@v2` | `cluster_name`, kubectl | +//! +//! Each function returns a `DeployStep` for the schema and a YAML snippet +//! string for direct embedding. Rollback hints are also provided per target. + +use super::context::DeployTarget; +use super::schema::{DeployStep, RollbackInfo}; + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Generates the deploy step for the given GCP target. +pub fn generate_gcp_deploy(target: &DeployTarget, image_tag: &str) -> DeployStep { + match target { + DeployTarget::CloudRun => DeployStep { + strategy: "rolling".to_string(), + command: "google-github-actions/deploy-cloudrun@v2".to_string(), + args: vec![ + "service={{CLOUD_RUN_SERVICE}}".to_string(), + format!("image={image_tag}"), + "region={{GCP_REGION}}".to_string(), + ], + target: target.clone(), + }, + DeployTarget::Gke => DeployStep { + strategy: "rolling".to_string(), + command: "kubectl set image".to_string(), + args: vec![ + "deployment/{{DEPLOYMENT_NAME}}".to_string(), + format!("app={image_tag}"), + "--namespace={{K8S_NAMESPACE}}".to_string(), + ], + target: target.clone(), + }, + other => DeployStep { + strategy: "rolling".to_string(), + command: format!("echo 'Unsupported GCP target: {other}'"), + args: vec![], + target: other.clone(), + }, + } +} + +/// Generates rollback info for the given GCP target. +pub fn gcp_rollback_info(target: &DeployTarget) -> RollbackInfo { + match target { + DeployTarget::CloudRun => RollbackInfo { + strategy: "traffic-shift".to_string(), + command_hint: "gcloud run services update-traffic {{CLOUD_RUN_SERVICE}} --region={{GCP_REGION}} --to-revisions=LATEST=0,=100".to_string(), + }, + DeployTarget::Gke => RollbackInfo { + strategy: "rollout-undo".to_string(), + command_hint: "kubectl rollout undo deployment/{{DEPLOYMENT_NAME}} -n {{K8S_NAMESPACE}}".to_string(), + }, + _ => RollbackInfo { + strategy: "manual".to_string(), + command_hint: "Manually redeploy the previous version".to_string(), + }, + } +} + +/// Renders the Cloud Run deploy step as a GitHub Actions YAML snippet. +pub fn render_cloud_run_deploy_yaml(image_tag: &str) -> String { + format!( + "\ + - name: Deploy to Cloud Run + uses: google-github-actions/deploy-cloudrun@v2 + with: + service: ${{{{ secrets.CLOUD_RUN_SERVICE }}}} + image: {image_tag} + region: ${{{{ secrets.GCP_REGION }}}}\n" + ) +} + +/// Renders the GKE deploy steps as a GitHub Actions YAML snippet. +/// +/// Emits two steps: get GKE credentials, then kubectl set image. +pub fn render_gke_deploy_yaml(image_tag: &str) -> String { + format!( + "\ + - name: Get GKE credentials + uses: google-github-actions/get-gke-credentials@v2 + with: + cluster_name: ${{{{ secrets.GKE_CLUSTER_NAME }}}} + location: ${{{{ secrets.GCP_REGION }}}} + + - name: Deploy to GKE + run: | + kubectl set image deployment/${{{{ secrets.DEPLOYMENT_NAME }}}} \\ + app={image_tag} \\ + --namespace=${{{{ secrets.K8S_NAMESPACE }}}} + kubectl rollout status deployment/${{{{ secrets.DEPLOYMENT_NAME }}}} \\ + --namespace=${{{{ secrets.K8S_NAMESPACE }}}} \\ + --timeout=300s\n" + ) +} + +/// Renders the deploy YAML snippet for any GCP target. +pub fn render_gcp_deploy_yaml(target: &DeployTarget, image_tag: &str) -> String { + match target { + DeployTarget::CloudRun => render_cloud_run_deploy_yaml(image_tag), + DeployTarget::Gke => render_gke_deploy_yaml(image_tag), + _ => format!(" - name: Deploy\n run: echo 'Unsupported GCP target'\n"), + } +} + +/// Returns secrets required for the GCP deploy target. +pub fn gcp_deploy_required_secrets(target: &DeployTarget) -> Vec { + match target { + DeployTarget::CloudRun => vec![ + "CLOUD_RUN_SERVICE".to_string(), + "GCP_REGION".to_string(), + ], + DeployTarget::Gke => vec![ + "GKE_CLUSTER_NAME".to_string(), + "GCP_REGION".to_string(), + "DEPLOYMENT_NAME".to_string(), + "K8S_NAMESPACE".to_string(), + ], + _ => vec![], + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + const IMAGE: &str = "us-central1-docker.pkg.dev/proj/repo/app:sha123"; + + // ── generate_gcp_deploy ─────────────────────────────────────────── + + #[test] + fn cloud_run_deploy_step_uses_correct_action() { + let step = generate_gcp_deploy(&DeployTarget::CloudRun, IMAGE); + assert_eq!(step.command, "google-github-actions/deploy-cloudrun@v2"); + } + + #[test] + fn cloud_run_deploy_step_strategy_is_rolling() { + let step = generate_gcp_deploy(&DeployTarget::CloudRun, IMAGE); + assert_eq!(step.strategy, "rolling"); + } + + #[test] + fn cloud_run_deploy_step_contains_service_placeholder() { + let step = generate_gcp_deploy(&DeployTarget::CloudRun, IMAGE); + assert!(step.args.iter().any(|a| a.contains("{{CLOUD_RUN_SERVICE}}"))); + } + + #[test] + fn cloud_run_deploy_step_contains_region_placeholder() { + let step = generate_gcp_deploy(&DeployTarget::CloudRun, IMAGE); + assert!(step.args.iter().any(|a| a.contains("{{GCP_REGION}}"))); + } + + #[test] + fn cloud_run_deploy_step_contains_image() { + let step = generate_gcp_deploy(&DeployTarget::CloudRun, IMAGE); + assert!(step.args.iter().any(|a| a.contains(IMAGE))); + } + + #[test] + fn gke_deploy_step_uses_kubectl() { + let step = generate_gcp_deploy(&DeployTarget::Gke, IMAGE); + assert!(step.command.contains("kubectl")); + } + + #[test] + fn gke_deploy_step_contains_namespace_placeholder() { + let step = generate_gcp_deploy(&DeployTarget::Gke, IMAGE); + assert!(step.args.iter().any(|a| a.contains("{{K8S_NAMESPACE}}"))); + } + + #[test] + fn gke_deploy_step_contains_deployment_name_placeholder() { + let step = generate_gcp_deploy(&DeployTarget::Gke, IMAGE); + assert!(step.args.iter().any(|a| a.contains("{{DEPLOYMENT_NAME}}"))); + } + + #[test] + fn gke_deploy_step_target_preserved() { + let step = generate_gcp_deploy(&DeployTarget::Gke, IMAGE); + assert_eq!(step.target, DeployTarget::Gke); + } + + // ── gcp_rollback_info ───────────────────────────────────────────── + + #[test] + fn cloud_run_rollback_uses_traffic_shift() { + let info = gcp_rollback_info(&DeployTarget::CloudRun); + assert_eq!(info.strategy, "traffic-shift"); + } + + #[test] + fn cloud_run_rollback_mentions_update_traffic() { + let info = gcp_rollback_info(&DeployTarget::CloudRun); + assert!(info.command_hint.contains("update-traffic")); + } + + #[test] + fn gke_rollback_uses_rollout_undo() { + let info = gcp_rollback_info(&DeployTarget::Gke); + assert_eq!(info.strategy, "rollout-undo"); + assert!(info.command_hint.contains("rollout undo")); + } + + // ── render_gcp_deploy_yaml ──────────────────────────────────────── + + #[test] + fn cloud_run_yaml_contains_action() { + let yaml = render_gcp_deploy_yaml(&DeployTarget::CloudRun, IMAGE); + assert!(yaml.contains("google-github-actions/deploy-cloudrun@v2")); + } + + #[test] + fn cloud_run_yaml_contains_image() { + let yaml = render_gcp_deploy_yaml(&DeployTarget::CloudRun, IMAGE); + assert!(yaml.contains(IMAGE)); + } + + #[test] + fn cloud_run_yaml_references_service_secret() { + let yaml = render_gcp_deploy_yaml(&DeployTarget::CloudRun, IMAGE); + assert!(yaml.contains("secrets.CLOUD_RUN_SERVICE")); + } + + #[test] + fn cloud_run_yaml_references_region_secret() { + let yaml = render_gcp_deploy_yaml(&DeployTarget::CloudRun, IMAGE); + assert!(yaml.contains("secrets.GCP_REGION")); + } + + #[test] + fn gke_yaml_contains_get_credentials_action() { + let yaml = render_gcp_deploy_yaml(&DeployTarget::Gke, IMAGE); + assert!(yaml.contains("google-github-actions/get-gke-credentials@v2")); + } + + #[test] + fn gke_yaml_contains_kubectl_set_image() { + let yaml = render_gcp_deploy_yaml(&DeployTarget::Gke, IMAGE); + assert!(yaml.contains("kubectl set image")); + } + + #[test] + fn gke_yaml_contains_rollout_status_wait() { + let yaml = render_gcp_deploy_yaml(&DeployTarget::Gke, IMAGE); + assert!(yaml.contains("kubectl rollout status")); + assert!(yaml.contains("timeout=300s")); + } + + #[test] + fn gke_yaml_references_cluster_name() { + let yaml = render_gcp_deploy_yaml(&DeployTarget::Gke, IMAGE); + assert!(yaml.contains("secrets.GKE_CLUSTER_NAME")); + } + + // ── gcp_deploy_required_secrets ─────────────────────────────────── + + #[test] + fn cloud_run_requires_service_and_region() { + let secrets = gcp_deploy_required_secrets(&DeployTarget::CloudRun); + assert!(secrets.contains(&"CLOUD_RUN_SERVICE".to_string())); + assert!(secrets.contains(&"GCP_REGION".to_string())); + } + + #[test] + fn gke_requires_four_secrets() { + let secrets = gcp_deploy_required_secrets(&DeployTarget::Gke); + assert_eq!(secrets.len(), 4); + assert!(secrets.contains(&"GKE_CLUSTER_NAME".to_string())); + } +} diff --git a/src/generator/cd_generation/deploy_hetzner.rs b/src/generator/cd_generation/deploy_hetzner.rs new file mode 100644 index 00000000..f8c734a5 --- /dev/null +++ b/src/generator/cd_generation/deploy_hetzner.rs @@ -0,0 +1,317 @@ +//! CD-09 — Hetzner Deploy Step Generator +//! +//! Generates GitHub Actions YAML snippets for Hetzner deployment targets: +//! +//! | Target | Method | Steps | +//! |-------------|-------------------------------|-------------------------------------| +//! | VPS | SSH + Docker Compose | `ssh` → `docker pull` → `up -d` | +//! | HetznerK8s | kubectl via kubeconfig | `kubectl set image` | +//! | Coolify | Coolify API webhook | `curl` POST to webhook URL | +//! +//! VPS deployments use SSH to pull the latest image and restart services +//! on the remote host via `docker compose`. + +use super::context::DeployTarget; +use super::schema::{DeployStep, RollbackInfo}; + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Generates the deploy step for the given Hetzner target. +pub fn generate_hetzner_deploy(target: &DeployTarget, image_tag: &str) -> DeployStep { + match target { + DeployTarget::Vps => DeployStep { + strategy: "recreate".to_string(), + command: "ssh".to_string(), + args: vec![ + "${{ secrets.SSH_USER }}@${{ secrets.SSH_HOST }}".to_string(), + format!("'docker pull {image_tag} && docker compose up -d'"), + ], + target: target.clone(), + }, + DeployTarget::HetznerK8s => DeployStep { + strategy: "rolling".to_string(), + command: "kubectl set image".to_string(), + args: vec![ + "deployment/{{DEPLOYMENT_NAME}}".to_string(), + format!("app={image_tag}"), + "--namespace={{K8S_NAMESPACE}}".to_string(), + ], + target: target.clone(), + }, + DeployTarget::Coolify => DeployStep { + strategy: "rolling".to_string(), + command: "curl".to_string(), + args: vec![ + "-fsSL".to_string(), + "-X POST".to_string(), + "${{ secrets.COOLIFY_WEBHOOK }}".to_string(), + ], + target: target.clone(), + }, + other => DeployStep { + strategy: "recreate".to_string(), + command: format!("echo 'Unsupported Hetzner target: {other}'"), + args: vec![], + target: other.clone(), + }, + } +} + +/// Generates rollback info for the given Hetzner target. +pub fn hetzner_rollback_info(target: &DeployTarget) -> RollbackInfo { + match target { + DeployTarget::Vps => RollbackInfo { + strategy: "manual".to_string(), + command_hint: "ssh $SSH_USER@$SSH_HOST 'docker compose down && docker pull && docker compose up -d'".to_string(), + }, + DeployTarget::HetznerK8s => RollbackInfo { + strategy: "rollout-undo".to_string(), + command_hint: "kubectl rollout undo deployment/{{DEPLOYMENT_NAME}} -n {{K8S_NAMESPACE}}".to_string(), + }, + DeployTarget::Coolify => RollbackInfo { + strategy: "manual".to_string(), + command_hint: "Use the Coolify dashboard to rollback to a previous deployment".to_string(), + }, + _ => RollbackInfo { + strategy: "manual".to_string(), + command_hint: "Manually redeploy the previous version".to_string(), + }, + } +} + +/// Renders the VPS deploy step as a GitHub Actions YAML snippet. +pub fn render_vps_deploy_yaml(image_tag: &str) -> String { + format!( + "\ + - name: Deploy to VPS via SSH + run: | + ssh ${{{{ secrets.SSH_USER }}}}@${{{{ secrets.SSH_HOST }}}} << 'DEPLOY_EOF' + docker pull {image_tag} + cd /opt/app && docker compose up -d + DEPLOY_EOF\n" + ) +} + +/// Renders the Hetzner K8s deploy step as a GitHub Actions YAML snippet. +pub fn render_hetzner_k8s_deploy_yaml(image_tag: &str) -> String { + format!( + "\ + - name: Deploy to Hetzner Kubernetes + run: | + kubectl set image deployment/${{{{ secrets.DEPLOYMENT_NAME }}}} \\ + app={image_tag} \\ + --namespace=${{{{ secrets.K8S_NAMESPACE }}}} + kubectl rollout status deployment/${{{{ secrets.DEPLOYMENT_NAME }}}} \\ + --namespace=${{{{ secrets.K8S_NAMESPACE }}}} \\ + --timeout=300s\n" + ) +} + +/// Renders the Coolify deploy step as a GitHub Actions YAML snippet. +pub fn render_coolify_deploy_yaml() -> String { + "\ + - name: Trigger Coolify deployment + run: | + curl -fsSL -X POST \"${{ secrets.COOLIFY_WEBHOOK }}\"\n" + .to_string() +} + +/// Renders the deploy YAML snippet for any Hetzner target. +pub fn render_hetzner_deploy_yaml(target: &DeployTarget, image_tag: &str) -> String { + match target { + DeployTarget::Vps => render_vps_deploy_yaml(image_tag), + DeployTarget::HetznerK8s => render_hetzner_k8s_deploy_yaml(image_tag), + DeployTarget::Coolify => render_coolify_deploy_yaml(), + _ => format!(" - name: Deploy\n run: echo 'Unsupported Hetzner target'\n"), + } +} + +/// Returns secrets required for the Hetzner deploy target. +pub fn hetzner_deploy_required_secrets(target: &DeployTarget) -> Vec { + match target { + DeployTarget::Vps => vec![ + "SSH_USER".to_string(), + "SSH_HOST".to_string(), + ], + DeployTarget::HetznerK8s => vec![ + "DEPLOYMENT_NAME".to_string(), + "K8S_NAMESPACE".to_string(), + ], + DeployTarget::Coolify => vec![ + "COOLIFY_WEBHOOK".to_string(), + ], + _ => vec![], + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + const IMAGE: &str = "ghcr.io/user/app:sha123"; + + // ── generate_hetzner_deploy ─────────────────────────────────────── + + #[test] + fn vps_deploy_step_uses_ssh() { + let step = generate_hetzner_deploy(&DeployTarget::Vps, IMAGE); + assert_eq!(step.command, "ssh"); + } + + #[test] + fn vps_deploy_step_strategy_is_recreate() { + let step = generate_hetzner_deploy(&DeployTarget::Vps, IMAGE); + assert_eq!(step.strategy, "recreate"); + } + + #[test] + fn vps_deploy_step_contains_docker_pull() { + let step = generate_hetzner_deploy(&DeployTarget::Vps, IMAGE); + assert!(step.args.iter().any(|a| a.contains("docker pull"))); + } + + #[test] + fn vps_deploy_step_contains_compose_up() { + let step = generate_hetzner_deploy(&DeployTarget::Vps, IMAGE); + assert!(step.args.iter().any(|a| a.contains("docker compose up"))); + } + + #[test] + fn vps_deploy_step_target_preserved() { + let step = generate_hetzner_deploy(&DeployTarget::Vps, IMAGE); + assert_eq!(step.target, DeployTarget::Vps); + } + + #[test] + fn k8s_deploy_step_uses_kubectl() { + let step = generate_hetzner_deploy(&DeployTarget::HetznerK8s, IMAGE); + assert!(step.command.contains("kubectl")); + } + + #[test] + fn k8s_deploy_step_strategy_is_rolling() { + let step = generate_hetzner_deploy(&DeployTarget::HetznerK8s, IMAGE); + assert_eq!(step.strategy, "rolling"); + } + + #[test] + fn k8s_deploy_step_contains_deployment_placeholder() { + let step = generate_hetzner_deploy(&DeployTarget::HetznerK8s, IMAGE); + assert!(step.args.iter().any(|a| a.contains("{{DEPLOYMENT_NAME}}"))); + } + + #[test] + fn coolify_deploy_step_uses_curl() { + let step = generate_hetzner_deploy(&DeployTarget::Coolify, IMAGE); + assert_eq!(step.command, "curl"); + } + + #[test] + fn coolify_deploy_step_contains_webhook_ref() { + let step = generate_hetzner_deploy(&DeployTarget::Coolify, IMAGE); + assert!(step.args.iter().any(|a| a.contains("COOLIFY_WEBHOOK"))); + } + + // ── hetzner_rollback_info ───────────────────────────────────────── + + #[test] + fn vps_rollback_is_manual() { + let info = hetzner_rollback_info(&DeployTarget::Vps); + assert_eq!(info.strategy, "manual"); + } + + #[test] + fn vps_rollback_mentions_docker_compose() { + let info = hetzner_rollback_info(&DeployTarget::Vps); + assert!(info.command_hint.contains("docker compose")); + } + + #[test] + fn k8s_rollback_uses_rollout_undo() { + let info = hetzner_rollback_info(&DeployTarget::HetznerK8s); + assert_eq!(info.strategy, "rollout-undo"); + assert!(info.command_hint.contains("rollout undo")); + } + + #[test] + fn coolify_rollback_references_dashboard() { + let info = hetzner_rollback_info(&DeployTarget::Coolify); + assert!(info.command_hint.contains("Coolify dashboard")); + } + + // ── render_hetzner_deploy_yaml ──────────────────────────────────── + + #[test] + fn vps_yaml_contains_ssh_command() { + let yaml = render_hetzner_deploy_yaml(&DeployTarget::Vps, IMAGE); + assert!(yaml.contains("ssh")); + } + + #[test] + fn vps_yaml_contains_docker_pull() { + let yaml = render_hetzner_deploy_yaml(&DeployTarget::Vps, IMAGE); + assert!(yaml.contains("docker pull")); + } + + #[test] + fn vps_yaml_contains_docker_compose_up() { + let yaml = render_hetzner_deploy_yaml(&DeployTarget::Vps, IMAGE); + assert!(yaml.contains("docker compose up -d")); + } + + #[test] + fn vps_yaml_references_ssh_secrets() { + let yaml = render_hetzner_deploy_yaml(&DeployTarget::Vps, IMAGE); + assert!(yaml.contains("secrets.SSH_USER")); + assert!(yaml.contains("secrets.SSH_HOST")); + } + + #[test] + fn k8s_yaml_contains_kubectl_set_image() { + let yaml = render_hetzner_deploy_yaml(&DeployTarget::HetznerK8s, IMAGE); + assert!(yaml.contains("kubectl set image")); + } + + #[test] + fn k8s_yaml_contains_rollout_status() { + let yaml = render_hetzner_deploy_yaml(&DeployTarget::HetznerK8s, IMAGE); + assert!(yaml.contains("kubectl rollout status")); + } + + #[test] + fn coolify_yaml_contains_curl_post() { + let yaml = render_hetzner_deploy_yaml(&DeployTarget::Coolify, IMAGE); + assert!(yaml.contains("curl")); + assert!(yaml.contains("-X POST")); + } + + #[test] + fn coolify_yaml_references_webhook_secret() { + let yaml = render_hetzner_deploy_yaml(&DeployTarget::Coolify, IMAGE); + assert!(yaml.contains("secrets.COOLIFY_WEBHOOK")); + } + + // ── hetzner_deploy_required_secrets ─────────────────────────────── + + #[test] + fn vps_requires_ssh_user_and_host() { + let secrets = hetzner_deploy_required_secrets(&DeployTarget::Vps); + assert!(secrets.contains(&"SSH_USER".to_string())); + assert!(secrets.contains(&"SSH_HOST".to_string())); + } + + #[test] + fn k8s_requires_deployment_and_namespace() { + let secrets = hetzner_deploy_required_secrets(&DeployTarget::HetznerK8s); + assert!(secrets.contains(&"DEPLOYMENT_NAME".to_string())); + assert!(secrets.contains(&"K8S_NAMESPACE".to_string())); + } + + #[test] + fn coolify_requires_webhook() { + let secrets = hetzner_deploy_required_secrets(&DeployTarget::Coolify); + assert!(secrets.contains(&"COOLIFY_WEBHOOK".to_string())); + } +} diff --git a/src/generator/cd_generation/dispatch.rs b/src/generator/cd_generation/dispatch.rs new file mode 100644 index 00000000..204565ce --- /dev/null +++ b/src/generator/cd_generation/dispatch.rs @@ -0,0 +1,292 @@ +//! CD-29 — Manual Dispatch Inputs +//! +//! Generates the `workflow_dispatch` block that lets operators trigger a deploy +//! manually from the GitHub Actions UI (or API). +//! +//! ```yaml +//! on: +//! workflow_dispatch: +//! inputs: +//! image_tag: +//! description: 'Image tag to deploy (leave empty for latest build)' +//! required: false +//! type: string +//! environment: +//! description: 'Target environment' +//! required: true +//! type: choice +//! options: +//! - development +//! - staging +//! - production +//! ``` + +/// A dispatch input definition. +#[derive(Debug, Clone, PartialEq)] +pub struct DispatchInput { + pub name: String, + pub description: String, + pub required: bool, + pub input_type: DispatchInputType, +} + +/// Type discriminator for dispatch inputs. +#[derive(Debug, Clone, PartialEq)] +pub enum DispatchInputType { + /// Free-form string input. + StringInput { default: Option }, + /// Constrained choice input. + Choice { options: Vec }, + /// Boolean toggle. + Boolean { default: bool }, +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Generates the standard set of dispatch inputs for a CD workflow. +/// +/// Returns the `image_tag` (optional string) and `environment` (required choice) +/// inputs. Extra environments can be supplied; defaults to +/// `["development", "staging", "production"]`. +pub fn generate_dispatch_inputs(environments: &[String]) -> Vec { + let env_options = if environments.is_empty() { + vec![ + "development".to_string(), + "staging".to_string(), + "production".to_string(), + ] + } else { + environments.to_vec() + }; + + vec![ + DispatchInput { + name: "image_tag".to_string(), + description: "Image tag to deploy (leave empty for latest build)".to_string(), + required: false, + input_type: DispatchInputType::StringInput { default: None }, + }, + DispatchInput { + name: "environment".to_string(), + description: "Target environment".to_string(), + required: true, + input_type: DispatchInputType::Choice { + options: env_options, + }, + }, + DispatchInput { + name: "dry_run".to_string(), + description: "Perform a dry-run without deploying".to_string(), + required: false, + input_type: DispatchInputType::Boolean { default: false }, + }, + ] +} + +/// Renders the `workflow_dispatch:` block as YAML. +pub fn render_dispatch_yaml(inputs: &[DispatchInput]) -> String { + let mut yaml = String::new(); + yaml.push_str(" workflow_dispatch:\n"); + + if inputs.is_empty() { + return yaml; + } + + yaml.push_str(" inputs:\n"); + for input in inputs { + yaml.push_str(&format!(" {}:\n", input.name)); + yaml.push_str(&format!( + " description: '{}'\n", + input.description + )); + yaml.push_str(&format!( + " required: {}\n", + input.required + )); + + match &input.input_type { + DispatchInputType::StringInput { default } => { + yaml.push_str(" type: string\n"); + if let Some(d) = default { + yaml.push_str(&format!(" default: '{d}'\n")); + } + } + DispatchInputType::Choice { options } => { + yaml.push_str(" type: choice\n"); + yaml.push_str(" options:\n"); + for opt in options { + yaml.push_str(&format!(" - {opt}\n")); + } + } + DispatchInputType::Boolean { default } => { + yaml.push_str(" type: boolean\n"); + yaml.push_str(&format!(" default: {default}\n")); + } + } + } + + yaml +} + +/// Returns a GitHub Actions expression to read a dispatch input at runtime. +/// +/// e.g. `${{ github.event.inputs.image_tag }}` +pub fn dispatch_input_expression(input_name: &str) -> String { + format!("${{{{ github.event.inputs.{input_name} }}}}") +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn generate_default_inputs_count() { + let inputs = generate_dispatch_inputs(&[]); + assert_eq!(inputs.len(), 3); + } + + #[test] + fn generate_image_tag_input() { + let inputs = generate_dispatch_inputs(&[]); + let image_tag = &inputs[0]; + assert_eq!(image_tag.name, "image_tag"); + assert!(!image_tag.required); + assert!(matches!( + image_tag.input_type, + DispatchInputType::StringInput { default: None } + )); + } + + #[test] + fn generate_environment_input_defaults() { + let inputs = generate_dispatch_inputs(&[]); + let env = &inputs[1]; + assert_eq!(env.name, "environment"); + assert!(env.required); + if let DispatchInputType::Choice { options } = &env.input_type { + assert_eq!(options.len(), 3); + assert_eq!(options[0], "development"); + assert_eq!(options[1], "staging"); + assert_eq!(options[2], "production"); + } else { + panic!("Expected Choice type"); + } + } + + #[test] + fn generate_environment_custom_envs() { + let envs = vec!["dev".to_string(), "prod".to_string()]; + let inputs = generate_dispatch_inputs(&envs); + let env = &inputs[1]; + if let DispatchInputType::Choice { options } = &env.input_type { + assert_eq!(options.len(), 2); + assert_eq!(options[0], "dev"); + assert_eq!(options[1], "prod"); + } else { + panic!("Expected Choice type"); + } + } + + #[test] + fn generate_dry_run_input() { + let inputs = generate_dispatch_inputs(&[]); + let dry_run = &inputs[2]; + assert_eq!(dry_run.name, "dry_run"); + assert!(!dry_run.required); + assert!(matches!( + dry_run.input_type, + DispatchInputType::Boolean { default: false } + )); + } + + #[test] + fn yaml_contains_workflow_dispatch() { + let inputs = generate_dispatch_inputs(&[]); + let yaml = render_dispatch_yaml(&inputs); + assert!(yaml.contains("workflow_dispatch:")); + } + + #[test] + fn yaml_contains_inputs_block() { + let inputs = generate_dispatch_inputs(&[]); + let yaml = render_dispatch_yaml(&inputs); + assert!(yaml.contains("inputs:")); + } + + #[test] + fn yaml_image_tag_type_string() { + let inputs = generate_dispatch_inputs(&[]); + let yaml = render_dispatch_yaml(&inputs); + assert!(yaml.contains("type: string")); + } + + #[test] + fn yaml_environment_type_choice() { + let inputs = generate_dispatch_inputs(&[]); + let yaml = render_dispatch_yaml(&inputs); + assert!(yaml.contains("type: choice")); + } + + #[test] + fn yaml_choice_options_listed() { + let inputs = generate_dispatch_inputs(&[]); + let yaml = render_dispatch_yaml(&inputs); + assert!(yaml.contains("- development")); + assert!(yaml.contains("- staging")); + assert!(yaml.contains("- production")); + } + + #[test] + fn yaml_boolean_input() { + let inputs = generate_dispatch_inputs(&[]); + let yaml = render_dispatch_yaml(&inputs); + assert!(yaml.contains("type: boolean")); + assert!(yaml.contains("default: false")); + } + + #[test] + fn yaml_empty_inputs() { + let yaml = render_dispatch_yaml(&[]); + assert!(yaml.contains("workflow_dispatch:")); + assert!(!yaml.contains("inputs:")); + } + + #[test] + fn dispatch_expression_format() { + let expr = dispatch_input_expression("image_tag"); + assert_eq!(expr, "${{ github.event.inputs.image_tag }}"); + } + + #[test] + fn dispatch_expression_environment() { + let expr = dispatch_input_expression("environment"); + assert_eq!(expr, "${{ github.event.inputs.environment }}"); + } + + #[test] + fn yaml_string_with_default() { + let inputs = vec![DispatchInput { + name: "version".to_string(), + description: "App version".to_string(), + required: false, + input_type: DispatchInputType::StringInput { + default: Some("latest".to_string()), + }, + }]; + let yaml = render_dispatch_yaml(&inputs); + assert!(yaml.contains("default: 'latest'")); + } + + #[test] + fn render_all_inputs_order() { + let inputs = generate_dispatch_inputs(&[]); + let yaml = render_dispatch_yaml(&inputs); + let image_pos = yaml.find("image_tag:").unwrap(); + let env_pos = yaml.find("environment:").unwrap(); + let dry_pos = yaml.find("dry_run:").unwrap(); + assert!(image_pos < env_pos); + assert!(env_pos < dry_pos); + } +} diff --git a/src/generator/cd_generation/environments.rs b/src/generator/cd_generation/environments.rs new file mode 100644 index 00000000..209fa2dd --- /dev/null +++ b/src/generator/cd_generation/environments.rs @@ -0,0 +1,278 @@ +//! CD-12 — Environment Strategy Module +//! +//! Generates multi-environment deployment strategy with `needs:` chains, +//! `if:` conditions based on branch filters, and GitHub Environment +//! references for approval gates. +//! +//! For a typical setup with staging + production: +//! +//! ```yaml +//! jobs: +//! deploy-staging: +//! environment: staging +//! if: github.ref == 'refs/heads/develop' +//! ... +//! +//! deploy-production: +//! environment: production +//! needs: deploy-staging +//! if: github.ref == 'refs/heads/main' +//! ... +//! ``` + +use super::schema::EnvironmentConfig; + +// ── Types ───────────────────────────────────────────────────────────────────── + +/// Represents a single environment job in the multi-env deploy chain. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct EnvironmentJob { + /// Job id used in the YAML (e.g. `deploy-staging`). + pub job_id: String, + /// GitHub Environment name (e.g. `staging`). + pub environment_name: String, + /// The `if:` condition for this job (e.g. `github.ref == 'refs/heads/main'`). + pub condition: Option, + /// The `needs:` dependency — previous job id in the chain. + pub needs: Option, + /// Whether this environment requires manual approval (GitHub Environment protection rule). + pub requires_approval: bool, +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Generates the ordered list of environment jobs from the pipeline's +/// environment configs. +/// +/// Each job depends on the previous one via `needs:`, creating a deploy chain. +pub fn generate_environment_jobs(environments: &[EnvironmentConfig]) -> Vec { + let mut jobs = Vec::with_capacity(environments.len()); + + for (i, env) in environments.iter().enumerate() { + let job_id = format!("deploy-{}", env.name); + let needs = if i > 0 { + Some(format!("deploy-{}", environments[i - 1].name)) + } else { + None + }; + let condition = env.branch_filter.as_ref().map(|branch| { + format!("github.ref == 'refs/heads/{branch}'") + }); + + jobs.push(EnvironmentJob { + job_id, + environment_name: env.name.clone(), + condition, + needs, + requires_approval: env.requires_approval, + }); + } + + jobs +} + +/// Renders the `jobs:` block header for a single environment job. +/// +/// This is a YAML snippet that goes at the start of each per-env deploy job. +pub fn render_environment_job_header(job: &EnvironmentJob) -> String { + let mut yaml = format!(" {}:\n", job.job_id); + yaml.push_str(" runs-on: ubuntu-latest\n"); + yaml.push_str(&format!( + " environment: {}\n", + job.environment_name + )); + + if let Some(ref needs) = job.needs { + yaml.push_str(&format!(" needs: {needs}\n")); + } + + if let Some(ref cond) = job.condition { + yaml.push_str(&format!(" if: {cond}\n")); + } + + yaml.push_str(" steps:\n"); + yaml +} + +/// Renders all environment job headers as a complete multi-job `jobs:` block. +pub fn render_environment_jobs_yaml(jobs: &[EnvironmentJob]) -> String { + let mut yaml = String::new(); + for job in jobs { + yaml.push_str(&render_environment_job_header(job)); + } + yaml +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::generator::cd_generation::schema::EnvironmentConfig; + + fn sample_environments() -> Vec { + vec![ + EnvironmentConfig { + name: "staging".to_string(), + branch_filter: Some("develop".to_string()), + requires_approval: false, + app_url: None, + namespace: None, + replicas: Some(1), + }, + EnvironmentConfig { + name: "production".to_string(), + branch_filter: Some("main".to_string()), + requires_approval: true, + app_url: None, + namespace: None, + replicas: Some(2), + }, + ] + } + + fn three_environments() -> Vec { + vec![ + EnvironmentConfig { + name: "dev".to_string(), + branch_filter: Some("develop".to_string()), + requires_approval: false, + app_url: None, + namespace: Some("dev".to_string()), + replicas: Some(1), + }, + EnvironmentConfig { + name: "staging".to_string(), + branch_filter: Some("develop".to_string()), + requires_approval: false, + app_url: None, + namespace: Some("staging".to_string()), + replicas: Some(1), + }, + EnvironmentConfig { + name: "production".to_string(), + branch_filter: Some("main".to_string()), + requires_approval: true, + app_url: None, + namespace: Some("production".to_string()), + replicas: Some(2), + }, + ] + } + + #[test] + fn generates_correct_number_of_jobs() { + let envs = sample_environments(); + let jobs = generate_environment_jobs(&envs); + assert_eq!(jobs.len(), 2); + } + + #[test] + fn first_job_has_no_needs() { + let envs = sample_environments(); + let jobs = generate_environment_jobs(&envs); + assert!(jobs[0].needs.is_none()); + } + + #[test] + fn second_job_needs_first() { + let envs = sample_environments(); + let jobs = generate_environment_jobs(&envs); + assert_eq!(jobs[1].needs.as_deref(), Some("deploy-staging")); + } + + #[test] + fn job_id_uses_env_name() { + let envs = sample_environments(); + let jobs = generate_environment_jobs(&envs); + assert_eq!(jobs[0].job_id, "deploy-staging"); + assert_eq!(jobs[1].job_id, "deploy-production"); + } + + #[test] + fn branch_filter_becomes_condition() { + let envs = sample_environments(); + let jobs = generate_environment_jobs(&envs); + assert_eq!( + jobs[0].condition.as_deref(), + Some("github.ref == 'refs/heads/develop'") + ); + assert_eq!( + jobs[1].condition.as_deref(), + Some("github.ref == 'refs/heads/main'") + ); + } + + #[test] + fn production_requires_approval() { + let envs = sample_environments(); + let jobs = generate_environment_jobs(&envs); + assert!(!jobs[0].requires_approval); + assert!(jobs[1].requires_approval); + } + + #[test] + fn three_env_chain_has_correct_needs() { + let envs = three_environments(); + let jobs = generate_environment_jobs(&envs); + assert_eq!(jobs.len(), 3); + assert!(jobs[0].needs.is_none()); + assert_eq!(jobs[1].needs.as_deref(), Some("deploy-dev")); + assert_eq!(jobs[2].needs.as_deref(), Some("deploy-staging")); + } + + #[test] + fn no_condition_when_no_branch_filter() { + let envs = vec![EnvironmentConfig { + name: "custom".to_string(), + branch_filter: None, + requires_approval: false, + app_url: None, + namespace: None, + replicas: None, + }]; + let jobs = generate_environment_jobs(&envs); + assert!(jobs[0].condition.is_none()); + } + + #[test] + fn render_header_contains_environment() { + let envs = sample_environments(); + let jobs = generate_environment_jobs(&envs); + let yaml = render_environment_job_header(&jobs[0]); + assert!(yaml.contains("environment: staging")); + assert!(yaml.contains("deploy-staging:")); + assert!(yaml.contains("runs-on: ubuntu-latest")); + } + + #[test] + fn render_header_contains_needs() { + let envs = sample_environments(); + let jobs = generate_environment_jobs(&envs); + let yaml = render_environment_job_header(&jobs[1]); + assert!(yaml.contains("needs: deploy-staging")); + } + + #[test] + fn render_header_contains_condition() { + let envs = sample_environments(); + let jobs = generate_environment_jobs(&envs); + let yaml = render_environment_job_header(&jobs[0]); + assert!(yaml.contains("if: github.ref == 'refs/heads/develop'")); + } + + #[test] + fn render_all_jobs_produces_both() { + let envs = sample_environments(); + let jobs = generate_environment_jobs(&envs); + let yaml = render_environment_jobs_yaml(&jobs); + assert!(yaml.contains("deploy-staging:")); + assert!(yaml.contains("deploy-production:")); + } + + #[test] + fn empty_environments_produces_empty_jobs() { + let jobs = generate_environment_jobs(&[]); + assert!(jobs.is_empty()); + } +} diff --git a/src/generator/cd_generation/health_check.rs b/src/generator/cd_generation/health_check.rs new file mode 100644 index 00000000..30a493e9 --- /dev/null +++ b/src/generator/cd_generation/health_check.rs @@ -0,0 +1,376 @@ +//! CD-11 — Post-Deploy Health Check Step Generator +//! +//! Generates a GitHub Actions YAML snippet that probes the deployed application +//! via `curl` with configurable retries. The health-check URL pattern depends +//! on the deploy target: +//! +//! | Target | URL Pattern | +//! |---------------|--------------------------------------------------------------| +//! | AppService | `https://{{APP_NAME}}.azurewebsites.net/{{HEALTH_PATH}}` | +//! | ContainerApps | `https://{{CONTAINER_APP_FQDN}}/{{HEALTH_PATH}}` | +//! | CloudRun | Uses Cloud Run service URL from previous step output | +//! | Aks / Gke / HetznerK8s | `kubectl rollout status` (no HTTP probe) | +//! | Vps | `https://{{SSH_HOST}}/{{HEALTH_PATH}}` | +//! | Coolify | `https://{{COOLIFY_DOMAIN}}/{{HEALTH_PATH}}` | + +use super::context::DeployTarget; +use super::schema::HealthCheckStep; + +/// Default health path when the caller doesn't provide one. +pub const DEFAULT_HEALTH_PATH: &str = "health"; + +/// Default retry count. +pub const DEFAULT_RETRIES: u32 = 5; + +/// Default interval between retries (seconds). +pub const DEFAULT_INTERVAL_SECS: u32 = 10; + +/// Default expected HTTP status code. +pub const DEFAULT_EXPECTED_STATUS: u16 = 200; + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Generates a `HealthCheckStep` tailored to the given deploy target. +/// +/// `health_path` is the path component (without leading `/`) of the health +/// endpoint. Defaults to `"health"` when `None`. +pub fn generate_health_check( + target: &DeployTarget, + health_path: Option<&str>, +) -> HealthCheckStep { + let path = health_path.unwrap_or(DEFAULT_HEALTH_PATH); + let url = health_check_url(target, path); + + HealthCheckStep { + url, + retries: DEFAULT_RETRIES, + interval_secs: DEFAULT_INTERVAL_SECS, + expected_status: DEFAULT_EXPECTED_STATUS, + } +} + +/// Returns the probe URL template for the given target and path. +pub fn health_check_url(target: &DeployTarget, health_path: &str) -> String { + match target { + DeployTarget::AppService => { + format!("https://${{{{ secrets.AZURE_APP_NAME }}}}.azurewebsites.net/{health_path}") + } + DeployTarget::ContainerApps => { + format!("https://${{{{ secrets.CONTAINER_APP_FQDN }}}}/{health_path}") + } + DeployTarget::CloudRun => { + // Cloud Run URL comes from the deploy step output. + format!("${{{{ steps.deploy.outputs.url }}}}/{health_path}") + } + DeployTarget::Aks | DeployTarget::Gke | DeployTarget::HetznerK8s => { + // Kubernetes targets use kubectl rollout status — no HTTP URL needed. + "kubectl://rollout-status".to_string() + } + DeployTarget::Vps => { + format!("https://${{{{ secrets.SSH_HOST }}}}/{health_path}") + } + DeployTarget::Coolify => { + format!("https://${{{{ secrets.COOLIFY_DOMAIN }}}}/{health_path}") + } + } +} + +/// Returns `true` when the target uses `kubectl rollout status` instead of +/// an HTTP health probe. +pub fn is_kubectl_health_check(target: &DeployTarget) -> bool { + matches!( + target, + DeployTarget::Aks | DeployTarget::Gke | DeployTarget::HetznerK8s + ) +} + +/// Renders the health-check step as a GitHub Actions YAML snippet. +/// +/// For Kubernetes targets, uses `kubectl rollout status` with a timeout. +/// For all other targets, uses `curl --fail --retry`. +pub fn render_health_check_yaml(target: &DeployTarget, step: &HealthCheckStep) -> String { + if is_kubectl_health_check(target) { + render_kubectl_health_check_yaml(target, step) + } else { + render_curl_health_check_yaml(target, step) + } +} + +/// Returns the secrets referenced by the health-check step. +pub fn health_check_required_secrets(target: &DeployTarget) -> Vec { + match target { + DeployTarget::AppService => vec!["AZURE_APP_NAME".to_string()], + DeployTarget::ContainerApps => vec!["CONTAINER_APP_FQDN".to_string()], + DeployTarget::CloudRun => vec![], // URL from step output, no secret + DeployTarget::Aks | DeployTarget::Gke | DeployTarget::HetznerK8s => vec![], + DeployTarget::Vps => vec!["SSH_HOST".to_string()], + DeployTarget::Coolify => vec!["COOLIFY_DOMAIN".to_string()], + } +} + +// ── Private helpers ─────────────────────────────────────────────────────────── + +fn render_curl_health_check_yaml(target: &DeployTarget, step: &HealthCheckStep) -> String { + format!( + "\ + - name: Health check ({target}) + run: | + curl --fail \\ + --retry {retries} \\ + --retry-delay {interval} \\ + --retry-all-errors \\ + -o /dev/null -s -w '%{{http_code}}' \\ + {url} + env: + EXPECTED_STATUS: '{status}'\n", + target = target, + retries = step.retries, + interval = step.interval_secs, + url = step.url, + status = step.expected_status, + ) +} + +fn render_kubectl_health_check_yaml(target: &DeployTarget, step: &HealthCheckStep) -> String { + let timeout = step.retries * step.interval_secs; + format!( + "\ + - name: Health check ({target}) — rollout status + run: | + kubectl rollout status deployment/${{{{ secrets.K8S_DEPLOYMENT_NAME }}}} \\ + --namespace=${{{{ secrets.K8S_NAMESPACE }}}} \\ + --timeout={timeout}s\n", + target = target, + timeout = timeout, + ) +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + // ── generate_health_check ───────────────────────────────────────── + + #[test] + fn default_health_path_used_when_none() { + let step = generate_health_check(&DeployTarget::AppService, None); + assert!(step.url.contains("health")); + } + + #[test] + fn custom_health_path() { + let step = generate_health_check(&DeployTarget::AppService, Some("readyz")); + assert!(step.url.contains("readyz")); + } + + #[test] + fn default_retries() { + let step = generate_health_check(&DeployTarget::CloudRun, None); + assert_eq!(step.retries, DEFAULT_RETRIES); + } + + #[test] + fn default_interval() { + let step = generate_health_check(&DeployTarget::CloudRun, None); + assert_eq!(step.interval_secs, DEFAULT_INTERVAL_SECS); + } + + #[test] + fn default_expected_status() { + let step = generate_health_check(&DeployTarget::CloudRun, None); + assert_eq!(step.expected_status, DEFAULT_EXPECTED_STATUS); + } + + // ── health_check_url ────────────────────────────────────────────── + + #[test] + fn app_service_url_pattern() { + let url = health_check_url(&DeployTarget::AppService, "health"); + assert!(url.contains("azurewebsites.net/health")); + assert!(url.contains("AZURE_APP_NAME")); + } + + #[test] + fn container_apps_url_pattern() { + let url = health_check_url(&DeployTarget::ContainerApps, "health"); + assert!(url.contains("CONTAINER_APP_FQDN")); + } + + #[test] + fn cloud_run_url_uses_step_output() { + let url = health_check_url(&DeployTarget::CloudRun, "health"); + assert!(url.contains("steps.deploy.outputs.url")); + } + + #[test] + fn kubernetes_targets_return_kubectl_sentinel() { + for target in &[DeployTarget::Aks, DeployTarget::Gke, DeployTarget::HetznerK8s] { + let url = health_check_url(target, "health"); + assert_eq!(url, "kubectl://rollout-status"); + } + } + + #[test] + fn vps_url_pattern() { + let url = health_check_url(&DeployTarget::Vps, "status"); + assert!(url.contains("SSH_HOST")); + assert!(url.contains("status")); + } + + #[test] + fn coolify_url_pattern() { + let url = health_check_url(&DeployTarget::Coolify, "health"); + assert!(url.contains("COOLIFY_DOMAIN")); + } + + // ── is_kubectl_health_check ─────────────────────────────────────── + + #[test] + fn aks_is_kubectl() { + assert!(is_kubectl_health_check(&DeployTarget::Aks)); + } + + #[test] + fn gke_is_kubectl() { + assert!(is_kubectl_health_check(&DeployTarget::Gke)); + } + + #[test] + fn hetzner_k8s_is_kubectl() { + assert!(is_kubectl_health_check(&DeployTarget::HetznerK8s)); + } + + #[test] + fn app_service_is_not_kubectl() { + assert!(!is_kubectl_health_check(&DeployTarget::AppService)); + } + + #[test] + fn vps_is_not_kubectl() { + assert!(!is_kubectl_health_check(&DeployTarget::Vps)); + } + + #[test] + fn coolify_is_not_kubectl() { + assert!(!is_kubectl_health_check(&DeployTarget::Coolify)); + } + + #[test] + fn cloud_run_is_not_kubectl() { + assert!(!is_kubectl_health_check(&DeployTarget::CloudRun)); + } + + // ── render_health_check_yaml ────────────────────────────────────── + + #[test] + fn curl_yaml_for_app_service() { + let step = generate_health_check(&DeployTarget::AppService, None); + let yaml = render_health_check_yaml(&DeployTarget::AppService, &step); + assert!(yaml.contains("curl --fail")); + assert!(yaml.contains("--retry 5")); + } + + #[test] + fn curl_yaml_for_cloud_run() { + let step = generate_health_check(&DeployTarget::CloudRun, None); + let yaml = render_health_check_yaml(&DeployTarget::CloudRun, &step); + assert!(yaml.contains("curl --fail")); + assert!(yaml.contains("steps.deploy.outputs.url")); + } + + #[test] + fn kubectl_yaml_for_aks() { + let step = generate_health_check(&DeployTarget::Aks, None); + let yaml = render_health_check_yaml(&DeployTarget::Aks, &step); + assert!(yaml.contains("kubectl rollout status")); + assert!(yaml.contains("K8S_DEPLOYMENT_NAME")); + } + + #[test] + fn kubectl_yaml_timeout_calculated_from_retries() { + let step = generate_health_check(&DeployTarget::Gke, None); + let yaml = render_health_check_yaml(&DeployTarget::Gke, &step); + let expected_timeout = DEFAULT_RETRIES * DEFAULT_INTERVAL_SECS; + assert!(yaml.contains(&format!("--timeout={}s", expected_timeout))); + } + + #[test] + fn kubectl_yaml_references_namespace() { + let step = generate_health_check(&DeployTarget::HetznerK8s, None); + let yaml = render_health_check_yaml(&DeployTarget::HetznerK8s, &step); + assert!(yaml.contains("K8S_NAMESPACE")); + } + + #[test] + fn vps_curl_yaml() { + let step = generate_health_check(&DeployTarget::Vps, Some("ping")); + let yaml = render_health_check_yaml(&DeployTarget::Vps, &step); + assert!(yaml.contains("curl --fail")); + assert!(yaml.contains("SSH_HOST")); + assert!(yaml.contains("ping")); + } + + #[test] + fn coolify_curl_yaml() { + let step = generate_health_check(&DeployTarget::Coolify, None); + let yaml = render_health_check_yaml(&DeployTarget::Coolify, &step); + assert!(yaml.contains("curl --fail")); + assert!(yaml.contains("COOLIFY_DOMAIN")); + } + + #[test] + fn yaml_contains_step_name() { + let step = generate_health_check(&DeployTarget::AppService, None); + let yaml = render_health_check_yaml(&DeployTarget::AppService, &step); + assert!(yaml.contains("Health check")); + } + + #[test] + fn curl_yaml_includes_retry_delay() { + let step = generate_health_check(&DeployTarget::ContainerApps, None); + let yaml = render_health_check_yaml(&DeployTarget::ContainerApps, &step); + assert!(yaml.contains(&format!("--retry-delay {}", DEFAULT_INTERVAL_SECS))); + } + + // ── health_check_required_secrets ───────────────────────────────── + + #[test] + fn app_service_requires_app_name() { + let secrets = health_check_required_secrets(&DeployTarget::AppService); + assert!(secrets.contains(&"AZURE_APP_NAME".to_string())); + } + + #[test] + fn container_apps_requires_fqdn() { + let secrets = health_check_required_secrets(&DeployTarget::ContainerApps); + assert!(secrets.contains(&"CONTAINER_APP_FQDN".to_string())); + } + + #[test] + fn cloud_run_requires_no_secrets() { + let secrets = health_check_required_secrets(&DeployTarget::CloudRun); + assert!(secrets.is_empty()); + } + + #[test] + fn k8s_targets_require_no_secrets() { + for target in &[DeployTarget::Aks, DeployTarget::Gke, DeployTarget::HetznerK8s] { + let secrets = health_check_required_secrets(target); + assert!(secrets.is_empty(), "Unexpected secrets for {target}"); + } + } + + #[test] + fn vps_requires_ssh_host() { + let secrets = health_check_required_secrets(&DeployTarget::Vps); + assert!(secrets.contains(&"SSH_HOST".to_string())); + } + + #[test] + fn coolify_requires_domain() { + let secrets = health_check_required_secrets(&DeployTarget::Coolify); + assert!(secrets.contains(&"COOLIFY_DOMAIN".to_string())); + } +} diff --git a/src/generator/cd_generation/manifest.rs b/src/generator/cd_generation/manifest.rs new file mode 100644 index 00000000..0e68a80c --- /dev/null +++ b/src/generator/cd_generation/manifest.rs @@ -0,0 +1,258 @@ +//! CD Manifest Writer — CD-22 +//! +//! Serialises both the resolved and unresolved token inventories, plus +//! environment metadata, to `cd-manifest.toml`. +//! +//! The manifest file serves two purposes: +//! 1. **Agent fill phase** — the LLM agent reads `[unresolved]` entries +//! and patches them with real values. +//! 2. **Interactive prompts** — the wizard presents `[unresolved]` entries +//! to the human developer for manual input. + +use std::collections::HashMap; +use std::path::Path; + +use serde::Serialize; + +use crate::error::{GeneratorError, IaCGeneratorError}; + +use super::schema::{EnvironmentConfig, UnresolvedToken}; +use super::token_resolver::ResolvedTokenMap; + +// ── Manifest structure ──────────────────────────────────────────────────────── + +/// A single unresolved token entry in the TOML manifest. +#[derive(Debug, Serialize)] +struct UnresolvedEntry { + #[serde(rename = "type")] + token_type: String, + hint: String, +} + +/// A single environment entry in the TOML manifest. +#[derive(Debug, Serialize)] +struct EnvironmentEntry { + requires_approval: bool, + #[serde(skip_serializing_if = "Option::is_none")] + branch_filter: Option, + #[serde(skip_serializing_if = "Option::is_none")] + app_url: Option, + #[serde(skip_serializing_if = "Option::is_none")] + namespace: Option, + #[serde(skip_serializing_if = "Option::is_none")] + replicas: Option, +} + +/// Top-level manifest structure serialised to TOML. +#[derive(Debug, Serialize)] +struct CdManifest { + resolved: HashMap, + unresolved: HashMap, + environments: HashMap, +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Writes `cd-manifest.toml` containing resolved tokens, unresolved tokens, +/// and environment configuration. +pub fn write_cd_manifest( + resolved: &ResolvedTokenMap, + unresolved: &[UnresolvedToken], + environments: &[EnvironmentConfig], + dest: &Path, +) -> crate::Result<()> { + let manifest = CdManifest { + resolved: resolved.clone(), + unresolved: unresolved + .iter() + .map(|u| { + ( + u.name.clone(), + UnresolvedEntry { + token_type: u.token_type.clone(), + hint: u.hint.clone(), + }, + ) + }) + .collect(), + environments: environments + .iter() + .map(|e| { + ( + e.name.clone(), + EnvironmentEntry { + requires_approval: e.requires_approval, + branch_filter: e.branch_filter.clone(), + app_url: e.app_url.clone(), + namespace: e.namespace.clone(), + replicas: e.replicas, + }, + ) + }) + .collect(), + }; + + let content = toml::to_string_pretty(&manifest).map_err(|e| { + IaCGeneratorError::Generation(GeneratorError::InvalidContext(e.to_string())) + })?; + + std::fs::write(dest, content)?; + Ok(()) +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + fn sample_resolved() -> ResolvedTokenMap { + let mut map = HashMap::new(); + map.insert("PROJECT_NAME".to_string(), "my-app".to_string()); + map.insert("IMAGE_NAME".to_string(), "my-app".to_string()); + map.insert("REGISTRY_URL".to_string(), "ghcr.io".to_string()); + map.insert("DEFAULT_BRANCH".to_string(), "main".to_string()); + map + } + + fn sample_unresolved() -> Vec { + vec![ + UnresolvedToken::new("APP_URL", "Public URL of your application", "url"), + UnresolvedToken::new("GCP_REGION", "GCP region for deployment", "string"), + ] + } + + fn sample_environments() -> Vec { + vec![ + EnvironmentConfig { + name: "staging".to_string(), + branch_filter: Some("develop".to_string()), + requires_approval: false, + app_url: None, + namespace: Some("staging".to_string()), + replicas: Some(1), + }, + EnvironmentConfig { + name: "production".to_string(), + branch_filter: Some("main".to_string()), + requires_approval: true, + app_url: Some("https://my-app.example.com".to_string()), + namespace: Some("prod".to_string()), + replicas: Some(3), + }, + ] + } + + #[test] + fn write_manifest_produces_valid_toml() { + let dir = TempDir::new().unwrap(); + let dest = dir.path().join("cd-manifest.toml"); + + write_cd_manifest(&sample_resolved(), &sample_unresolved(), &sample_environments(), &dest) + .expect("write_cd_manifest failed"); + + let content = std::fs::read_to_string(&dest).unwrap(); + // Should parse back as valid TOML. + let _: toml::Value = toml::from_str(&content).expect("output is valid TOML"); + } + + #[test] + fn manifest_contains_resolved_section() { + let dir = TempDir::new().unwrap(); + let dest = dir.path().join("cd-manifest.toml"); + + write_cd_manifest(&sample_resolved(), &sample_unresolved(), &sample_environments(), &dest) + .unwrap(); + + let content = std::fs::read_to_string(&dest).unwrap(); + assert!(content.contains("[resolved]")); + assert!(content.contains("PROJECT_NAME")); + assert!(content.contains("my-app")); + assert!(content.contains("ghcr.io")); + } + + #[test] + fn manifest_contains_unresolved_section() { + let dir = TempDir::new().unwrap(); + let dest = dir.path().join("cd-manifest.toml"); + + write_cd_manifest(&sample_resolved(), &sample_unresolved(), &sample_environments(), &dest) + .unwrap(); + + let content = std::fs::read_to_string(&dest).unwrap(); + assert!(content.contains("[unresolved.APP_URL]")); + assert!(content.contains("[unresolved.GCP_REGION]")); + assert!(content.contains("Public URL")); + assert!(content.contains(r#"type = "url""#)); + } + + #[test] + fn manifest_contains_environments_section() { + let dir = TempDir::new().unwrap(); + let dest = dir.path().join("cd-manifest.toml"); + + write_cd_manifest(&sample_resolved(), &sample_unresolved(), &sample_environments(), &dest) + .unwrap(); + + let content = std::fs::read_to_string(&dest).unwrap(); + assert!(content.contains("[environments.staging]") || content.contains("[environments.production]")); + assert!(content.contains("requires_approval = true")); + assert!(content.contains("replicas = 3")); + } + + #[test] + fn manifest_empty_unresolved() { + let dir = TempDir::new().unwrap(); + let dest = dir.path().join("cd-manifest.toml"); + + write_cd_manifest(&sample_resolved(), &[], &sample_environments(), &dest).unwrap(); + + let content = std::fs::read_to_string(&dest).unwrap(); + assert!(content.contains("[resolved]")); + // Unresolved section should be empty map. + assert!(content.contains("[unresolved]")); + } + + #[test] + fn manifest_single_environment_no_optional_fields() { + let dir = TempDir::new().unwrap(); + let dest = dir.path().join("cd-manifest.toml"); + + let envs = vec![EnvironmentConfig { + name: "production".to_string(), + branch_filter: None, + requires_approval: false, + app_url: None, + namespace: None, + replicas: None, + }]; + + write_cd_manifest(&sample_resolved(), &[], &envs, &dest).unwrap(); + + let content = std::fs::read_to_string(&dest).unwrap(); + assert!(content.contains("[environments.production]")); + assert!(content.contains("requires_approval = false")); + // Optional fields should not appear. + assert!(!content.contains("app_url")); + assert!(!content.contains("namespace")); + assert!(!content.contains("replicas")); + } + + #[test] + fn manifest_file_is_written_to_disk() { + let dir = TempDir::new().unwrap(); + let dest = dir.path().join("subdir").join("cd-manifest.toml"); + + // Parent dir doesn't exist yet — write should handle this? + // Actually std::fs::write requires parent to exist. Let's create it. + std::fs::create_dir_all(dest.parent().unwrap()).unwrap(); + + write_cd_manifest(&sample_resolved(), &sample_unresolved(), &sample_environments(), &dest) + .unwrap(); + + assert!(dest.exists()); + let content = std::fs::read_to_string(&dest).unwrap(); + assert!(!content.is_empty()); + } +} diff --git a/src/generator/cd_generation/migration.rs b/src/generator/cd_generation/migration.rs new file mode 100644 index 00000000..11889236 --- /dev/null +++ b/src/generator/cd_generation/migration.rs @@ -0,0 +1,318 @@ +//! CD-10 — Database Migration Step Generator +//! +//! Generates GitHub Actions YAML snippets for running database migrations +//! before the deployment step. The migration tool is detected by context +//! collection (CD-02) and stored in `CdContext.migration_tool`. +//! +//! | Tool | Detection | Command | +//! |-------------------|-----------------------------|--------------------------------| +//! | Flyway | `flyway.conf` | `flyway migrate` | +//! | Liquibase | `liquibase.properties` | `liquibase update` | +//! | Alembic | `alembic.ini` | `alembic upgrade head` | +//! | Django | `manage.py` | `python manage.py migrate` | +//! | Prisma | `schema.prisma` | `npx prisma migrate deploy` | +//! | sqlx | `sqlx-data.json` / `.sqlx/` | `sqlx migrate run` | +//! | Diesel | `diesel.toml` | `diesel migration run` | +//! +//! For Hetzner VPS targets, the migration command is executed via SSH. + +use super::context::MigrationTool; +use super::schema::MigrationStep; + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Generates a `MigrationStep` for the detected migration tool. +/// +/// Returns `None` when no migration tool was detected. +/// +/// The `via_ssh` flag is set when the target is a Hetzner VPS (migration +/// must run on the remote host rather than in the runner). +pub fn generate_migration_step( + tool: Option<&MigrationTool>, + via_ssh: bool, +) -> Option { + let tool = tool?; + let command = migration_command(tool); + + Some(MigrationStep { + tool: tool.clone(), + command, + via_ssh, + }) +} + +/// Returns the canonical migration command for the given tool. +pub fn migration_command(tool: &MigrationTool) -> String { + match tool { + MigrationTool::Flyway => "flyway migrate".to_string(), + MigrationTool::Liquibase => "liquibase update".to_string(), + MigrationTool::Alembic => "alembic upgrade head".to_string(), + MigrationTool::DjangoMigrations => "python manage.py migrate --noinput".to_string(), + MigrationTool::Prisma => "npx prisma migrate deploy".to_string(), + MigrationTool::Sqlx => "sqlx migrate run".to_string(), + MigrationTool::Diesel => "diesel migration run".to_string(), + } +} + +/// Renders the migration step as a GitHub Actions YAML snippet. +/// +/// When `via_ssh` is true, wraps the command in an SSH invocation. +pub fn render_migration_yaml(step: &MigrationStep) -> String { + if step.via_ssh { + format!( + "\ + - name: Run database migrations ({tool}) via SSH + run: | + ssh ${{{{ secrets.SSH_USER }}}}@${{{{ secrets.SSH_HOST }}}} << 'MIGRATE_EOF' + cd /opt/app && {command} + MIGRATE_EOF + env: + DATABASE_URL: ${{{{ secrets.DATABASE_URL }}}}\n", + tool = step.tool, + command = step.command, + ) + } else { + format!( + "\ + - name: Run database migrations ({tool}) + run: {command} + env: + DATABASE_URL: ${{{{ secrets.DATABASE_URL }}}}\n", + tool = step.tool, + command = step.command, + ) + } +} + +/// Returns secrets required for the migration step. +pub fn migration_required_secrets(step: &MigrationStep) -> Vec { + let mut secrets = vec!["DATABASE_URL".to_string()]; + if step.via_ssh { + secrets.push("SSH_USER".to_string()); + secrets.push("SSH_HOST".to_string()); + } + secrets +} + +/// Renders secrets documentation for the migration step. +pub fn migration_secrets_doc(step: &MigrationStep) -> String { + let mut doc = format!( + "\ +### `DATABASE_URL` *(required)* + +Database connection string used by `{}` for running migrations. + +**Where to set:** Repository → Settings → Secrets and variables → Actions + +**Format examples:** +- PostgreSQL: `postgresql://user:pass@host:5432/dbname` +- MySQL: `mysql://user:pass@host:3306/dbname` +- SQLite: `sqlite:./db.sqlite`\n", + step.tool + ); + + if step.via_ssh { + doc.push_str( + "\n\ +**Note:** This secret is passed as an environment variable to the SSH session. +Ensure the database is reachable from the VPS, not from the GitHub Actions runner.\n", + ); + } + + doc +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + // ── generate_migration_step ─────────────────────────────────────── + + #[test] + fn none_tool_returns_none() { + assert!(generate_migration_step(None, false).is_none()); + } + + #[test] + fn prisma_returns_some() { + let step = generate_migration_step(Some(&MigrationTool::Prisma), false); + assert!(step.is_some()); + } + + #[test] + fn prisma_command() { + let step = generate_migration_step(Some(&MigrationTool::Prisma), false).unwrap(); + assert_eq!(step.command, "npx prisma migrate deploy"); + } + + #[test] + fn diesel_command() { + let step = generate_migration_step(Some(&MigrationTool::Diesel), false).unwrap(); + assert_eq!(step.command, "diesel migration run"); + } + + #[test] + fn alembic_command() { + let step = generate_migration_step(Some(&MigrationTool::Alembic), false).unwrap(); + assert_eq!(step.command, "alembic upgrade head"); + } + + #[test] + fn django_command_has_noinput() { + let step = generate_migration_step(Some(&MigrationTool::DjangoMigrations), false).unwrap(); + assert!(step.command.contains("--noinput")); + } + + #[test] + fn flyway_command() { + let step = generate_migration_step(Some(&MigrationTool::Flyway), false).unwrap(); + assert_eq!(step.command, "flyway migrate"); + } + + #[test] + fn liquibase_command() { + let step = generate_migration_step(Some(&MigrationTool::Liquibase), false).unwrap(); + assert_eq!(step.command, "liquibase update"); + } + + #[test] + fn sqlx_command() { + let step = generate_migration_step(Some(&MigrationTool::Sqlx), false).unwrap(); + assert_eq!(step.command, "sqlx migrate run"); + } + + #[test] + fn via_ssh_flag_preserved() { + let step = generate_migration_step(Some(&MigrationTool::Prisma), true).unwrap(); + assert!(step.via_ssh); + } + + #[test] + fn not_via_ssh_by_default() { + let step = generate_migration_step(Some(&MigrationTool::Prisma), false).unwrap(); + assert!(!step.via_ssh); + } + + // ── migration_command ───────────────────────────────────────────── + + #[test] + fn all_tools_produce_nonempty_command() { + let tools = [ + MigrationTool::Flyway, + MigrationTool::Liquibase, + MigrationTool::Alembic, + MigrationTool::DjangoMigrations, + MigrationTool::Prisma, + MigrationTool::Sqlx, + MigrationTool::Diesel, + ]; + for tool in &tools { + let cmd = migration_command(tool); + assert!(!cmd.is_empty(), "Empty command for {tool}"); + } + } + + // ── render_migration_yaml ───────────────────────────────────────── + + #[test] + fn local_yaml_contains_run_command() { + let step = generate_migration_step(Some(&MigrationTool::Prisma), false).unwrap(); + let yaml = render_migration_yaml(&step); + assert!(yaml.contains("npx prisma migrate deploy")); + } + + #[test] + fn local_yaml_references_database_url() { + let step = generate_migration_step(Some(&MigrationTool::Prisma), false).unwrap(); + let yaml = render_migration_yaml(&step); + assert!(yaml.contains("secrets.DATABASE_URL")); + } + + #[test] + fn local_yaml_does_not_contain_ssh() { + let step = generate_migration_step(Some(&MigrationTool::Prisma), false).unwrap(); + let yaml = render_migration_yaml(&step); + assert!(!yaml.contains("ssh")); + } + + #[test] + fn ssh_yaml_contains_ssh_command() { + let step = generate_migration_step(Some(&MigrationTool::Alembic), true).unwrap(); + let yaml = render_migration_yaml(&step); + assert!(yaml.contains("ssh")); + } + + #[test] + fn ssh_yaml_references_ssh_secrets() { + let step = generate_migration_step(Some(&MigrationTool::Alembic), true).unwrap(); + let yaml = render_migration_yaml(&step); + assert!(yaml.contains("secrets.SSH_USER")); + assert!(yaml.contains("secrets.SSH_HOST")); + } + + #[test] + fn ssh_yaml_contains_migration_command() { + let step = generate_migration_step(Some(&MigrationTool::Alembic), true).unwrap(); + let yaml = render_migration_yaml(&step); + assert!(yaml.contains("alembic upgrade head")); + } + + #[test] + fn yaml_step_name_contains_tool_name() { + let step = generate_migration_step(Some(&MigrationTool::Diesel), false).unwrap(); + let yaml = render_migration_yaml(&step); + assert!(yaml.contains("diesel")); + } + + // ── migration_required_secrets ──────────────────────────────────── + + #[test] + fn local_requires_database_url() { + let step = generate_migration_step(Some(&MigrationTool::Prisma), false).unwrap(); + let secrets = migration_required_secrets(&step); + assert!(secrets.contains(&"DATABASE_URL".to_string())); + assert_eq!(secrets.len(), 1); + } + + #[test] + fn ssh_requires_database_url_and_ssh_secrets() { + let step = generate_migration_step(Some(&MigrationTool::Prisma), true).unwrap(); + let secrets = migration_required_secrets(&step); + assert!(secrets.contains(&"DATABASE_URL".to_string())); + assert!(secrets.contains(&"SSH_USER".to_string())); + assert!(secrets.contains(&"SSH_HOST".to_string())); + assert_eq!(secrets.len(), 3); + } + + // ── migration_secrets_doc ───────────────────────────────────────── + + #[test] + fn secrets_doc_mentions_database_url() { + let step = generate_migration_step(Some(&MigrationTool::Prisma), false).unwrap(); + let doc = migration_secrets_doc(&step); + assert!(doc.contains("DATABASE_URL")); + } + + #[test] + fn secrets_doc_mentions_tool_name() { + let step = generate_migration_step(Some(&MigrationTool::Diesel), false).unwrap(); + let doc = migration_secrets_doc(&step); + assert!(doc.contains("diesel")); + } + + #[test] + fn ssh_secrets_doc_mentions_vpn_note() { + let step = generate_migration_step(Some(&MigrationTool::Prisma), true).unwrap(); + let doc = migration_secrets_doc(&step); + assert!(doc.contains("VPS")); + } + + #[test] + fn secrets_doc_contains_format_examples() { + let step = generate_migration_step(Some(&MigrationTool::Sqlx), false).unwrap(); + let doc = migration_secrets_doc(&step); + assert!(doc.contains("postgresql://")); + } +} diff --git a/src/generator/cd_generation/mod.rs b/src/generator/cd_generation/mod.rs new file mode 100644 index 00000000..f7cfbbce --- /dev/null +++ b/src/generator/cd_generation/mod.rs @@ -0,0 +1,61 @@ +//! CD Pipeline Generation Module +//! +//! Generates CD (Continuous Deployment) pipeline skeletons from project +//! analysis. Mirrors the CI generation architecture: context collection → +//! schema assembly → template rendering → file writing. +//! +//! ## Submodules +//! +//! - `context` — `CdContext` struct and context collector (CD-02) +//! - `schema` — Platform-agnostic `CdPipeline` data model (CD-17) +//! - `token_resolver` — Two-pass placeholder token engine for CD (CD-15 adapted) +//! - `manifest` — `cd-manifest.toml` writer (CD-22) +//! - `registry` — Container registry login steps + image tag strategy (CD-03) +//! - `auth_azure` — Azure OIDC authentication step (CD-04) +//! - `auth_gcp` — GCP Workload Identity Federation auth step (CD-05) +//! - `auth_hetzner` — Hetzner SSH / kubeconfig auth step (CD-06) +//! - `deploy_azure` — Azure deploy steps: App Service, AKS, Container Apps (CD-07) +//! - `deploy_gcp` — GCP deploy steps: Cloud Run, GKE (CD-08) +//! - `deploy_hetzner` — Hetzner deploy steps: VPS, HetznerK8s, Coolify (CD-09) +//! - `migration` — Database migration step generator (CD-10) +//! - `health_check` — Post-deploy health check step (CD-11) +//! - `templates` — Full workflow YAML builders: Azure, GCP, Hetzner (CD-18/19/20) +//! - `writer` — CD file writer with conflict detection +//! - `environments` — Multi-environment job strategy (CD-12) +//! - `rollback` — Rollback script & YAML comments (CD-13) +//! - `reusable_workflow` — Reusable `_deploy-base.yml` with `workflow_call` (CD-14) +//! - `versioning` — Image tag versioning strategy (CD-15) +//! - `terraform_step` — Terraform init/plan/apply steps (CD-16) +//! - `notification` — Slack deployment notifications (CD-21) +//! - `dispatch` — Manual workflow_dispatch inputs (CD-29) +//! - `secrets_doc` — CD secrets inventory & Hetzner prerequisites (CD-27/28) +//! - `cd_config` — `.syncable.cd.toml` project-level config (CD-24) + +pub mod auth_azure; +pub mod auth_gcp; +pub mod auth_hetzner; +pub mod cd_config; +pub mod context; +pub mod deploy_azure; +pub mod deploy_gcp; +pub mod deploy_hetzner; +pub mod dispatch; +pub mod environments; +pub mod health_check; +pub mod manifest; +pub mod migration; +pub mod notification; +pub mod pipeline; +pub mod registry; +pub mod reusable_workflow; +pub mod rollback; +pub mod schema; +pub mod secrets_doc; +pub mod templates; +pub mod terraform_step; +pub mod token_resolver; +pub mod versioning; +pub mod writer; + +#[cfg(test)] +mod cd_tests; diff --git a/src/generator/cd_generation/notification.rs b/src/generator/cd_generation/notification.rs new file mode 100644 index 00000000..e2a88fbf --- /dev/null +++ b/src/generator/cd_generation/notification.rs @@ -0,0 +1,217 @@ +//! CD-21 — Deployment Notifications (Slack) +//! +//! Generates a Slack notification step that fires on success/failure of the +//! deploy job. Uses `slackapi/slack-github-action@v2` with a payload that +//! includes repo name, environment, branch, commit SHA, and status emoji. +//! +//! ```yaml +//! - name: Notify Slack +//! if: always() +//! uses: slackapi/slack-github-action@v2 +//! with: +//! webhook: ${{ secrets.SLACK_WEBHOOK_URL }} +//! payload: | +//! {"text":"āœ… *my-app* deployed to *production* …"} +//! ``` + +use super::schema::NotificationStep; + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Build a `NotificationStep` from user preferences. +pub fn generate_notification_step( + webhook_secret_name: &str, + on_success: bool, + on_failure: bool, +) -> NotificationStep { + NotificationStep { + channel_type: "slack".to_string(), + webhook_secret: webhook_secret_name.to_string(), + on_success, + on_failure, + } +} + +/// Renders a Slack notification step as a GitHub Actions YAML snippet. +/// +/// The step uses `if: always()` so it fires regardless of prior step outcomes. +/// The payload JSON includes dynamic GitHub context expressions. +pub fn render_notification_yaml(step: &NotificationStep) -> String { + let condition = notification_condition(step.on_success, step.on_failure); + + let mut yaml = String::new(); + yaml.push_str(" - name: Notify Slack\n"); + yaml.push_str(&format!(" if: {condition}\n")); + yaml.push_str(" uses: slackapi/slack-github-action@v2\n"); + yaml.push_str(" with:\n"); + yaml.push_str(&format!( + " webhook: ${{{{ secrets.{} }}}}\n", + step.webhook_secret + )); + yaml.push_str(" webhook-type: incoming-webhook\n"); + yaml.push_str(" payload: |\n"); + yaml.push_str(" {\n"); + yaml.push_str(" \"text\": \"${{ job.status == 'success' && 'āœ…' || 'āŒ' }} *${{ github.repository }}* deploy to *${{ github.ref_name }}* — ${{ job.status }}\\nCommit: `${{ github.sha }}` by ${{ github.actor }}\"\n"); + yaml.push_str(" }\n"); + + yaml +} + +/// Returns a short summary of the notification configuration. +pub fn notification_summary(step: &NotificationStep) -> String { + let events: Vec<&str> = [ + step.on_success.then_some("success"), + step.on_failure.then_some("failure"), + ] + .into_iter() + .flatten() + .collect(); + + format!( + "{} notification via {} on: {}", + step.channel_type, + step.webhook_secret, + events.join(", ") + ) +} + +// ── Helpers ─────────────────────────────────────────────────────────────────── + +/// Determine the `if:` condition based on success/failure flags. +fn notification_condition(on_success: bool, on_failure: bool) -> &'static str { + match (on_success, on_failure) { + (true, true) => "always()", + (true, false) => "success()", + (false, true) => "failure()", + // If neither flag is set we still emit, but guard with always() + (false, false) => "always()", + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn generate_step_sets_channel_type() { + let step = generate_notification_step("SLACK_WEBHOOK_URL", true, true); + assert_eq!(step.channel_type, "slack"); + } + + #[test] + fn generate_step_webhook_secret() { + let step = generate_notification_step("DEPLOY_SLACK_HOOK", true, false); + assert_eq!(step.webhook_secret, "DEPLOY_SLACK_HOOK"); + } + + #[test] + fn generate_step_on_success_flag() { + let step = generate_notification_step("HOOK", true, false); + assert!(step.on_success); + assert!(!step.on_failure); + } + + #[test] + fn generate_step_on_failure_flag() { + let step = generate_notification_step("HOOK", false, true); + assert!(!step.on_success); + assert!(step.on_failure); + } + + #[test] + fn yaml_contains_slack_action() { + let step = generate_notification_step("SLACK_WEBHOOK_URL", true, true); + let yaml = render_notification_yaml(&step); + assert!(yaml.contains("slackapi/slack-github-action@v2")); + } + + #[test] + fn yaml_always_condition_when_both() { + let step = generate_notification_step("HOOK", true, true); + let yaml = render_notification_yaml(&step); + assert!(yaml.contains("if: always()")); + } + + #[test] + fn yaml_success_condition_when_success_only() { + let step = generate_notification_step("HOOK", true, false); + let yaml = render_notification_yaml(&step); + assert!(yaml.contains("if: success()")); + } + + #[test] + fn yaml_failure_condition_when_failure_only() { + let step = generate_notification_step("HOOK", false, true); + let yaml = render_notification_yaml(&step); + assert!(yaml.contains("if: failure()")); + } + + #[test] + fn yaml_references_webhook_secret() { + let step = generate_notification_step("MY_SLACK_HOOK", true, true); + let yaml = render_notification_yaml(&step); + assert!(yaml.contains("secrets.MY_SLACK_HOOK")); + } + + #[test] + fn yaml_contains_payload() { + let step = generate_notification_step("HOOK", true, true); + let yaml = render_notification_yaml(&step); + assert!(yaml.contains("payload:")); + assert!(yaml.contains("github.repository")); + } + + #[test] + fn yaml_payload_includes_status_emoji() { + let step = generate_notification_step("HOOK", true, true); + let yaml = render_notification_yaml(&step); + assert!(yaml.contains("āœ…")); + assert!(yaml.contains("āŒ")); + } + + #[test] + fn summary_both_events() { + let step = generate_notification_step("HOOK", true, true); + let summary = notification_summary(&step); + assert!(summary.contains("success")); + assert!(summary.contains("failure")); + } + + #[test] + fn summary_success_only() { + let step = generate_notification_step("HOOK", true, false); + let summary = notification_summary(&step); + assert!(summary.contains("success")); + assert!(!summary.contains("failure")); + } + + #[test] + fn summary_failure_only() { + let step = generate_notification_step("HOOK", false, true); + let summary = notification_summary(&step); + assert!(!summary.contains("success")); + assert!(summary.contains("failure")); + } + + #[test] + fn condition_helper_both() { + assert_eq!(notification_condition(true, true), "always()"); + } + + #[test] + fn condition_helper_success() { + assert_eq!(notification_condition(true, false), "success()"); + } + + #[test] + fn condition_helper_failure() { + assert_eq!(notification_condition(false, true), "failure()"); + } + + #[test] + fn condition_helper_neither() { + assert_eq!(notification_condition(false, false), "always()"); + } +} diff --git a/src/generator/cd_generation/pipeline.rs b/src/generator/cd_generation/pipeline.rs new file mode 100644 index 00000000..01ee639e --- /dev/null +++ b/src/generator/cd_generation/pipeline.rs @@ -0,0 +1,454 @@ +//! CD Pipeline Builder +//! +//! Assembles a `CdPipeline` intermediate representation from a `CdContext`. +//! This mirrors the CI pattern: `collect_context → build_pipeline → resolve_tokens → render`. +//! +//! The builder calls platform-specific generators (auth, registry, deploy, +//! migration, health_check) and converts their outputs into schema types. + +use super::auth_azure; +use super::auth_gcp; +use super::auth_hetzner; +use super::context::{CdContext, CdPlatform, DeployTarget}; +use super::deploy_azure; +use super::deploy_gcp; +use super::deploy_hetzner; +use super::health_check; +use super::migration; +use super::notification; +use super::registry; +use super::schema::{ + CdPipeline, DockerBuildPushStep, EnvironmentConfig, +}; +use super::terraform_step; + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Assembles a complete `CdPipeline` from the given project context. +/// +/// The resulting pipeline can be fed to `token_resolver::resolve_tokens` and +/// then to one of the template renderers (azure/gcp/hetzner). +pub fn build_cd_pipeline(ctx: &CdContext) -> CdPipeline { + // ── Auth step ───────────────────────────────────────────────────────── + let auth = match ctx.platform { + CdPlatform::Azure => { + let cfg = auth_azure::generate_azure_auth(); + auth_azure::to_auth_step(&cfg) + } + CdPlatform::Gcp => { + let cfg = auth_gcp::generate_gcp_auth(); + auth_gcp::to_auth_step(&cfg) + } + CdPlatform::Hetzner => { + let cfg = auth_hetzner::generate_hetzner_auth(&ctx.deploy_target); + auth_hetzner::to_auth_step(&cfg) + } + }; + + // ── Registry step ───────────────────────────────────────────────────── + let reg_cfg = registry::generate_registry_config(&ctx.registry); + let registry_step = registry::to_registry_step(®_cfg); + + // ── Image tag ───────────────────────────────────────────────────────── + let image_tag = registry::build_image_tag(®_cfg, &ctx.image_name); + + // ── Docker build+push step ──────────────────────────────────────────── + let docker_build_push = DockerBuildPushStep { + image_tag: image_tag.clone(), + context: ".".to_string(), + dockerfile: "Dockerfile".to_string(), + push: true, + buildx: true, + build_args: vec![], + }; + + // ── Deploy step ─────────────────────────────────────────────────────── + let deploy = match ctx.platform { + CdPlatform::Azure => { + deploy_azure::generate_azure_deploy(&ctx.deploy_target, &image_tag) + } + CdPlatform::Gcp => { + deploy_gcp::generate_gcp_deploy(&ctx.deploy_target, &image_tag) + } + CdPlatform::Hetzner => { + deploy_hetzner::generate_hetzner_deploy(&ctx.deploy_target, &image_tag) + } + }; + + // ── Rollback info ───────────────────────────────────────────────────── + let rollback_info = match ctx.platform { + CdPlatform::Azure => deploy_azure::azure_rollback_info(&ctx.deploy_target), + CdPlatform::Gcp => deploy_gcp::gcp_rollback_info(&ctx.deploy_target), + CdPlatform::Hetzner => deploy_hetzner::hetzner_rollback_info(&ctx.deploy_target), + }; + + // ── Migration step ──────────────────────────────────────────────────── + let via_ssh = ctx.deploy_target == DeployTarget::Vps; + let migration_step = + migration::generate_migration_step(ctx.migration_tool.as_ref(), via_ssh); + let migration_step = migration_step.map(|mut s| { + if let Some(ref cmd) = ctx.migration_command_override { + s.command = cmd.clone(); + } + s + }); + + // ── Health check step ───────────────────────────────────────────────── + let health_check_step = health_check::generate_health_check( + &ctx.deploy_target, + ctx.health_check_path.as_deref(), + ); + + // ── Environment configs ─────────────────────────────────────────────── + let environments: Vec = ctx + .environments + .iter() + .map(|env| EnvironmentConfig { + name: env.name.clone(), + branch_filter: default_branch_filter(&env.name, &ctx.default_branch), + requires_approval: env.requires_approval, + app_url: None, + namespace: default_namespace(&env.name, &ctx.deploy_target), + replicas: default_replicas(&env.name), + }) + .collect(); + + // ── Terraform step (CD-16) ───────────────────────────────────────── + let terraform = if ctx.has_terraform { + let tf_dir = ctx + .terraform_dir + .as_ref() + .map(|p| p.to_string_lossy().to_string()) + .unwrap_or_else(|| "terraform".to_string()); + Some(terraform_step::generate_terraform_step(&tf_dir, false)) + } else { + None + }; + + // ── Notification step (CD-21) ──────────────────────────────────────── + let notifications = Some(notification::generate_notification_step( + "SLACK_WEBHOOK_URL", + true, + true, + )); + + CdPipeline { + project_name: ctx.project_name.clone(), + platform: ctx.platform.clone(), + deploy_target: ctx.deploy_target.clone(), + environments, + auth, + registry: registry_step, + docker_build_push, + migration: migration_step, + terraform, + deploy, + health_check: health_check_step, + rollback_info, + notifications, + unresolved_tokens: vec![], + default_branch: ctx.default_branch.clone(), + image_name: ctx.image_name.clone(), + } +} + +// ── Helpers ─────────────────────────────────────────────────────────────────── + +/// Returns a branch filter for common environment names. +fn default_branch_filter(env_name: &str, default_branch: &str) -> Option { + match env_name { + "production" | "prod" => Some(default_branch.to_string()), + "staging" | "stage" => Some("develop".to_string()), + "dev" | "development" => Some("develop".to_string()), + _ => None, + } +} + +/// Returns a Kubernetes namespace when the target is a k8s-based target. +fn default_namespace(env_name: &str, target: &DeployTarget) -> Option { + match target { + DeployTarget::Aks | DeployTarget::Gke | DeployTarget::HetznerK8s => { + Some(env_name.to_string()) + } + _ => None, + } +} + +/// Returns default replica counts per environment. +fn default_replicas(env_name: &str) -> Option { + match env_name { + "production" | "prod" => Some(2), + "staging" | "stage" => Some(1), + "dev" | "development" => Some(1), + _ => None, + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::generator::cd_generation::context::{ + CdPlatform, DeployTarget, Environment, Registry, + }; + use tempfile::TempDir; + + fn sample_context(platform: CdPlatform, target: DeployTarget) -> CdContext { + let tmp = TempDir::new().unwrap(); + let analysis = crate::analyzer::analyze_project(tmp.path()).unwrap(); + CdContext { + analysis, + project_name: "test-app".to_string(), + platform: platform.clone(), + deploy_target: target, + environments: vec![ + Environment { + name: "staging".to_string(), + requires_approval: false, + }, + Environment { + name: "production".to_string(), + requires_approval: true, + }, + ], + registry: match platform { + CdPlatform::Azure => Registry::Acr, + CdPlatform::Gcp => Registry::Gar, + CdPlatform::Hetzner => Registry::Ghcr, + }, + image_name: "test-app".to_string(), + has_terraform: false, + terraform_dir: None, + has_k8s_manifests: false, + k8s_manifest_dir: None, + has_helm_chart: false, + helm_chart_dir: None, + migration_tool: None, + migration_command_override: None, + health_check_path: Some("/health".to_string()), + default_branch: "main".to_string(), + has_dockerfile: true, + } + } + + // ── Azure ───────────────────────────────────────────────────────────── + + #[test] + fn azure_app_service_pipeline_has_oidc_auth() { + let ctx = sample_context(CdPlatform::Azure, DeployTarget::AppService); + let pipeline = build_cd_pipeline(&ctx); + assert_eq!(pipeline.auth.method, "oidc"); + assert!(pipeline.auth.action.as_deref() == Some("azure/login@v2")); + } + + #[test] + fn azure_pipeline_uses_acr_registry() { + let ctx = sample_context(CdPlatform::Azure, DeployTarget::AppService); + let pipeline = build_cd_pipeline(&ctx); + assert_eq!(pipeline.registry.registry, Registry::Acr); + } + + #[test] + fn azure_aks_deploy_step_uses_k8s_deploy_action() { + let ctx = sample_context(CdPlatform::Azure, DeployTarget::Aks); + let pipeline = build_cd_pipeline(&ctx); + assert_eq!(pipeline.deploy.command, "azure/k8s-deploy@v5"); + } + + #[test] + fn azure_pipeline_has_two_environments() { + let ctx = sample_context(CdPlatform::Azure, DeployTarget::AppService); + let pipeline = build_cd_pipeline(&ctx); + assert_eq!(pipeline.environments.len(), 2); + assert_eq!(pipeline.environments[0].name, "staging"); + assert_eq!(pipeline.environments[1].name, "production"); + } + + #[test] + fn production_env_has_branch_filter_main() { + let ctx = sample_context(CdPlatform::Azure, DeployTarget::AppService); + let pipeline = build_cd_pipeline(&ctx); + let prod = &pipeline.environments[1]; + assert_eq!(prod.branch_filter.as_deref(), Some("main")); + assert!(prod.requires_approval); + } + + #[test] + fn aks_environments_have_namespace() { + let ctx = sample_context(CdPlatform::Azure, DeployTarget::Aks); + let pipeline = build_cd_pipeline(&ctx); + assert_eq!( + pipeline.environments[0].namespace.as_deref(), + Some("staging") + ); + assert_eq!( + pipeline.environments[1].namespace.as_deref(), + Some("production") + ); + } + + // ── GCP ─────────────────────────────────────────────────────────────── + + #[test] + fn gcp_cloud_run_pipeline_has_wif_auth() { + let ctx = sample_context(CdPlatform::Gcp, DeployTarget::CloudRun); + let pipeline = build_cd_pipeline(&ctx); + assert_eq!(pipeline.auth.method, "workload-identity"); + } + + #[test] + fn gcp_pipeline_uses_gar_registry() { + let ctx = sample_context(CdPlatform::Gcp, DeployTarget::CloudRun); + let pipeline = build_cd_pipeline(&ctx); + assert_eq!(pipeline.registry.registry, Registry::Gar); + } + + #[test] + fn gcp_gke_deploy_uses_kubectl() { + let ctx = sample_context(CdPlatform::Gcp, DeployTarget::Gke); + let pipeline = build_cd_pipeline(&ctx); + assert!(pipeline.deploy.command.contains("kubectl")); + } + + // ── Hetzner ─────────────────────────────────────────────────────────── + + #[test] + fn hetzner_vps_pipeline_has_ssh_auth() { + let ctx = sample_context(CdPlatform::Hetzner, DeployTarget::Vps); + let pipeline = build_cd_pipeline(&ctx); + assert_eq!(pipeline.auth.method, "ssh"); + } + + #[test] + fn hetzner_pipeline_uses_ghcr() { + let ctx = sample_context(CdPlatform::Hetzner, DeployTarget::Vps); + let pipeline = build_cd_pipeline(&ctx); + assert_eq!(pipeline.registry.registry, Registry::Ghcr); + } + + #[test] + fn hetzner_vps_deploy_uses_ssh() { + let ctx = sample_context(CdPlatform::Hetzner, DeployTarget::Vps); + let pipeline = build_cd_pipeline(&ctx); + assert!(pipeline.deploy.command.contains("ssh")); + } + + // ── Migration ───────────────────────────────────────────────────────── + + #[test] + fn pipeline_without_migration_tool_has_no_migration_step() { + let ctx = sample_context(CdPlatform::Azure, DeployTarget::AppService); + let pipeline = build_cd_pipeline(&ctx); + assert!(pipeline.migration.is_none()); + } + + #[test] + fn pipeline_with_migration_tool_has_migration_step() { + use crate::generator::cd_generation::context::MigrationTool; + let mut ctx = sample_context(CdPlatform::Azure, DeployTarget::AppService); + ctx.migration_tool = Some(MigrationTool::Prisma); + let pipeline = build_cd_pipeline(&ctx); + assert!(pipeline.migration.is_some()); + assert!(pipeline.migration.unwrap().command.contains("prisma")); + } + + #[test] + fn hetzner_vps_migration_is_via_ssh() { + use crate::generator::cd_generation::context::MigrationTool; + let mut ctx = sample_context(CdPlatform::Hetzner, DeployTarget::Vps); + ctx.migration_tool = Some(MigrationTool::Alembic); + let pipeline = build_cd_pipeline(&ctx); + assert!(pipeline.migration.as_ref().unwrap().via_ssh); + } + + #[test] + fn migration_command_override_replaces_tool_default() { + use crate::generator::cd_generation::context::MigrationTool; + let mut ctx = sample_context(CdPlatform::Azure, DeployTarget::AppService); + ctx.migration_tool = Some(MigrationTool::Prisma); + ctx.migration_command_override = Some("npx prisma migrate deploy --schema=custom/schema.prisma".to_string()); + let pipeline = build_cd_pipeline(&ctx); + let step = pipeline.migration.expect("migration step should be present"); + assert_eq!( + step.command, + "npx prisma migrate deploy --schema=custom/schema.prisma" + ); + } + + #[test] + fn migration_command_override_without_tool_produces_no_step() { + let mut ctx = sample_context(CdPlatform::Azure, DeployTarget::AppService); + ctx.migration_command_override = Some("custom-migrate".to_string()); + // No migration_tool → generate_migration_step returns None, override is not applied + let pipeline = build_cd_pipeline(&ctx); + assert!(pipeline.migration.is_none()); + } + + // ── Docker build ────────────────────────────────────────────────────── + + #[test] + fn docker_build_push_defaults_to_buildx() { + let ctx = sample_context(CdPlatform::Azure, DeployTarget::AppService); + let pipeline = build_cd_pipeline(&ctx); + assert!(pipeline.docker_build_push.buildx); + assert!(pipeline.docker_build_push.push); + assert_eq!(pipeline.docker_build_push.context, "."); + } + + #[test] + fn image_tag_contains_registry_and_image_name() { + let ctx = sample_context(CdPlatform::Hetzner, DeployTarget::Vps); + let pipeline = build_cd_pipeline(&ctx); + assert!(pipeline.docker_build_push.image_tag.contains("ghcr.io")); + assert!(pipeline.docker_build_push.image_tag.contains("test-app")); + } + + // ── Health check ────────────────────────────────────────────────────── + + #[test] + fn health_check_uses_detected_path() { + let ctx = sample_context(CdPlatform::Azure, DeployTarget::AppService); + let pipeline = build_cd_pipeline(&ctx); + assert!(pipeline.health_check.url.contains("/health")); + } + + // ── Helpers ─────────────────────────────────────────────────────────── + + #[test] + fn default_branch_filter_production_uses_main() { + assert_eq!( + default_branch_filter("production", "main"), + Some("main".to_string()) + ); + } + + #[test] + fn default_branch_filter_staging_uses_develop() { + assert_eq!( + default_branch_filter("staging", "main"), + Some("develop".to_string()) + ); + } + + #[test] + fn default_branch_filter_unknown_returns_none() { + assert_eq!(default_branch_filter("custom-env", "main"), None); + } + + #[test] + fn default_namespace_for_k8s_targets() { + assert_eq!( + default_namespace("staging", &DeployTarget::Aks), + Some("staging".to_string()) + ); + assert_eq!(default_namespace("prod", &DeployTarget::AppService), None); + } + + #[test] + fn default_replicas_production_is_two() { + assert_eq!(default_replicas("production"), Some(2)); + assert_eq!(default_replicas("staging"), Some(1)); + assert_eq!(default_replicas("custom"), None); + } +} diff --git a/src/generator/cd_generation/registry.rs b/src/generator/cd_generation/registry.rs new file mode 100644 index 00000000..71116352 --- /dev/null +++ b/src/generator/cd_generation/registry.rs @@ -0,0 +1,446 @@ +//! CD-03 — Registry Config Module +//! +//! Generates GitHub Actions YAML snippets for container registry login and +//! image tag construction. Supports ACR, GAR, GHCR, and custom registries. +//! +//! Each function returns a ready-to-embed YAML step snippet string. Template +//! builders (Session 4) will compose these snippets into full workflow files. +//! +//! ## Image tag strategy +//! +//! All CD images are tagged with the git SHA for immutability: +//! `/:${{ github.sha }}` +//! +//! The registry URL is either deterministic (e.g. `ghcr.io`) or a +//! `{{PLACEHOLDER}}` token resolved by the token engine. + +use super::context::Registry; +use super::schema::RegistryStep; + +// ── Public types ────────────────────────────────────────────────────────────── + +/// Resolved registry configuration ready for YAML rendering. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct RegistryConfig { + /// Registry type. + pub registry: Registry, + /// Login action (GitHub Actions `uses:` reference) or `None` for shell-based login. + pub login_action: Option, + /// Full registry URL or placeholder. + pub registry_url: String, + /// Secrets required for login. + pub required_secrets: Vec, +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Builds a `RegistryConfig` for the given registry type. +pub fn generate_registry_config(registry: &Registry) -> RegistryConfig { + match registry { + Registry::Ghcr => RegistryConfig { + registry: Registry::Ghcr, + login_action: Some("docker/login-action@v3".to_string()), + registry_url: "ghcr.io".to_string(), + required_secrets: vec![], + }, + Registry::Acr => RegistryConfig { + registry: Registry::Acr, + login_action: Some("azure/docker-login@v2".to_string()), + registry_url: "{{ACR_LOGIN_SERVER}}".to_string(), + required_secrets: vec![ + "ACR_LOGIN_SERVER".to_string(), + ], + }, + Registry::Gar => RegistryConfig { + registry: Registry::Gar, + login_action: Some("docker/login-action@v3".to_string()), + registry_url: "{{GAR_LOCATION}}-docker.pkg.dev".to_string(), + required_secrets: vec![ + "GAR_LOCATION".to_string(), + "GCP_PROJECT_ID".to_string(), + ], + }, + Registry::Custom(url) => RegistryConfig { + registry: Registry::Custom(url.clone()), + login_action: Some("docker/login-action@v3".to_string()), + registry_url: url.clone(), + required_secrets: vec![ + "REGISTRY_USERNAME".to_string(), + "REGISTRY_PASSWORD".to_string(), + ], + }, + } +} + +/// Converts a `RegistryConfig` into the schema `RegistryStep` for pipeline assembly. +pub fn to_registry_step(config: &RegistryConfig) -> RegistryStep { + RegistryStep { + registry: config.registry.clone(), + login_action: config.login_action.clone(), + registry_url: config.registry_url.clone(), + } +} + +/// Renders the registry login step as a GitHub Actions YAML snippet. +pub fn render_registry_login_yaml(config: &RegistryConfig) -> String { + match &config.registry { + Registry::Ghcr => format!( + "\ + - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{{{ github.actor }}}} + password: ${{{{ secrets.GITHUB_TOKEN }}}}\n" + ), + Registry::Acr => format!( + "\ + - name: Log in to Azure Container Registry + uses: azure/docker-login@v2 + with: + login-server: ${{{{ secrets.ACR_LOGIN_SERVER }}}}\n" + ), + Registry::Gar => format!( + "\ + - name: Log in to Google Artifact Registry + uses: docker/login-action@v3 + with: + registry: ${{{{ secrets.GAR_LOCATION }}}}-docker.pkg.dev\n" + ), + Registry::Custom(url) => format!( + "\ + - name: Log in to container registry + uses: docker/login-action@v3 + with: + registry: {url} + username: ${{{{ secrets.REGISTRY_USERNAME }}}} + password: ${{{{ secrets.REGISTRY_PASSWORD }}}}\n" + ), + } +} + +/// Builds the full image tag string for CD pipelines. +/// +/// Format: `/:${{ github.sha }}` +pub fn build_image_tag(config: &RegistryConfig, image_name: &str) -> String { + format!( + "{}/{}:${{{{ github.sha }}}}", + config.registry_url, image_name + ) +} + +/// Builds the image tag for GAR which includes the project ID. +/// +/// Format: `-docker.pkg.dev///:${{ github.sha }}` +pub fn build_gar_image_tag(image_name: &str) -> String { + format!( + "{{{{GAR_LOCATION}}}}-docker.pkg.dev/{{{{GCP_PROJECT_ID}}}}/{image_name}/{image_name}:${{{{ github.sha }}}}" + ) +} + +/// Renders the Docker build and push steps as a GitHub Actions YAML snippet. +pub fn render_docker_build_push_yaml(image_tag: &str, dockerfile: &str, context: &str) -> String { + format!( + "\ + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build and push Docker image + uses: docker/build-push-action@v6 + with: + context: {context} + file: {dockerfile} + push: true + tags: {image_tag} + cache-from: type=gha + cache-to: type=gha,mode=max\n" + ) +} + +/// Returns secrets documentation entries for the registry. +pub fn registry_secrets_doc_entries(config: &RegistryConfig) -> String { + match &config.registry { + Registry::Ghcr => "\ +### `GITHUB_TOKEN` *(automatic)* + +Used to authenticate with GitHub Container Registry. Automatically provided by GitHub Actions.\n" + .to_string(), + Registry::Acr => "\ +### `ACR_LOGIN_SERVER` *(required)* + +Your Azure Container Registry login server URL, e.g. `myapp.azurecr.io`. + +**Where to set:** Repository → Settings → Secrets and variables → Actions + +**How to obtain:** `az acr show --name --query loginServer -o tsv`\n" + .to_string(), + Registry::Gar => "\ +### `GAR_LOCATION` *(required)* + +Google Artifact Registry location, e.g. `us-central1`. + +**Where to set:** Repository → Settings → Secrets and variables → Actions + +--- + +### `GCP_PROJECT_ID` *(required)* + +Your Google Cloud project ID. + +**Where to set:** Repository → Settings → Secrets and variables → Actions + +**How to obtain:** `gcloud config get-value project`\n" + .to_string(), + Registry::Custom(url) => format!( + "\ +### `REGISTRY_USERNAME` *(required)* + +Username for authenticating with `{url}`. + +**Where to set:** Repository → Settings → Secrets and variables → Actions + +--- + +### `REGISTRY_PASSWORD` *(required)* + +Password or access token for authenticating with `{url}`. + +**Where to set:** Repository → Settings → Secrets and variables → Actions\n" + ), + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + // ── generate_registry_config ────────────────────────────────────── + + #[test] + fn ghcr_config_has_deterministic_url() { + let config = generate_registry_config(&Registry::Ghcr); + assert_eq!(config.registry_url, "ghcr.io"); + } + + #[test] + fn ghcr_config_uses_docker_login_action() { + let config = generate_registry_config(&Registry::Ghcr); + assert_eq!(config.login_action.as_deref(), Some("docker/login-action@v3")); + } + + #[test] + fn ghcr_config_requires_no_extra_secrets() { + let config = generate_registry_config(&Registry::Ghcr); + assert!(config.required_secrets.is_empty()); + } + + #[test] + fn acr_config_has_placeholder_url() { + let config = generate_registry_config(&Registry::Acr); + assert!(config.registry_url.contains("{{ACR_LOGIN_SERVER}}")); + } + + #[test] + fn acr_config_uses_azure_docker_login() { + let config = generate_registry_config(&Registry::Acr); + assert_eq!(config.login_action.as_deref(), Some("azure/docker-login@v2")); + } + + #[test] + fn acr_config_requires_login_server_secret() { + let config = generate_registry_config(&Registry::Acr); + assert!(config.required_secrets.contains(&"ACR_LOGIN_SERVER".to_string())); + } + + #[test] + fn gar_config_has_placeholder_url() { + let config = generate_registry_config(&Registry::Gar); + assert!(config.registry_url.contains("{{GAR_LOCATION}}")); + } + + #[test] + fn gar_config_requires_location_and_project() { + let config = generate_registry_config(&Registry::Gar); + assert!(config.required_secrets.contains(&"GAR_LOCATION".to_string())); + assert!(config.required_secrets.contains(&"GCP_PROJECT_ID".to_string())); + } + + #[test] + fn custom_config_uses_provided_url() { + let config = generate_registry_config(&Registry::Custom("my.registry.io".to_string())); + assert_eq!(config.registry_url, "my.registry.io"); + } + + #[test] + fn custom_config_requires_username_and_password() { + let config = generate_registry_config(&Registry::Custom("my.registry.io".to_string())); + assert!(config.required_secrets.contains(&"REGISTRY_USERNAME".to_string())); + assert!(config.required_secrets.contains(&"REGISTRY_PASSWORD".to_string())); + } + + // ── to_registry_step ────────────────────────────────────────────── + + #[test] + fn to_registry_step_preserves_url() { + let config = generate_registry_config(&Registry::Ghcr); + let step = to_registry_step(&config); + assert_eq!(step.registry_url, "ghcr.io"); + assert_eq!(step.registry, Registry::Ghcr); + } + + // ── render_registry_login_yaml ──────────────────────────────────── + + #[test] + fn ghcr_yaml_references_github_token() { + let config = generate_registry_config(&Registry::Ghcr); + let yaml = render_registry_login_yaml(&config); + assert!(yaml.contains("secrets.GITHUB_TOKEN")); + } + + #[test] + fn ghcr_yaml_references_github_actor() { + let config = generate_registry_config(&Registry::Ghcr); + let yaml = render_registry_login_yaml(&config); + assert!(yaml.contains("github.actor")); + } + + #[test] + fn acr_yaml_references_login_server_secret() { + let config = generate_registry_config(&Registry::Acr); + let yaml = render_registry_login_yaml(&config); + assert!(yaml.contains("secrets.ACR_LOGIN_SERVER")); + } + + #[test] + fn gar_yaml_references_gar_location() { + let config = generate_registry_config(&Registry::Gar); + let yaml = render_registry_login_yaml(&config); + assert!(yaml.contains("secrets.GAR_LOCATION")); + } + + #[test] + fn custom_yaml_references_username_and_password() { + let config = generate_registry_config(&Registry::Custom("reg.io".to_string())); + let yaml = render_registry_login_yaml(&config); + assert!(yaml.contains("secrets.REGISTRY_USERNAME")); + assert!(yaml.contains("secrets.REGISTRY_PASSWORD")); + } + + #[test] + fn custom_yaml_contains_custom_registry_url() { + let config = generate_registry_config(&Registry::Custom("reg.io".to_string())); + let yaml = render_registry_login_yaml(&config); + assert!(yaml.contains("reg.io")); + } + + #[test] + fn all_login_yamls_contain_step_name() { + for reg in &[ + Registry::Ghcr, + Registry::Acr, + Registry::Gar, + Registry::Custom("x.io".to_string()), + ] { + let config = generate_registry_config(reg); + let yaml = render_registry_login_yaml(&config); + assert!(yaml.contains("- name:"), "Missing step name for {reg}"); + } + } + + // ── build_image_tag ─────────────────────────────────────────────── + + #[test] + fn image_tag_contains_registry_and_name() { + let config = generate_registry_config(&Registry::Ghcr); + let tag = build_image_tag(&config, "my-app"); + assert!(tag.starts_with("ghcr.io/my-app:")); + } + + #[test] + fn image_tag_contains_github_sha() { + let config = generate_registry_config(&Registry::Ghcr); + let tag = build_image_tag(&config, "my-app"); + assert!(tag.contains("github.sha")); + } + + #[test] + fn acr_image_tag_contains_placeholder() { + let config = generate_registry_config(&Registry::Acr); + let tag = build_image_tag(&config, "api"); + assert!(tag.contains("{{ACR_LOGIN_SERVER}}")); + } + + #[test] + fn gar_image_tag_contains_project_placeholders() { + let tag = build_gar_image_tag("api"); + assert!(tag.contains("{{GAR_LOCATION}}")); + assert!(tag.contains("{{GCP_PROJECT_ID}}")); + assert!(tag.contains("api")); + } + + // ── render_docker_build_push_yaml ───────────────────────────────── + + #[test] + fn docker_build_push_yaml_contains_buildx() { + let yaml = render_docker_build_push_yaml("ghcr.io/app:sha", "Dockerfile", "."); + assert!(yaml.contains("docker/setup-buildx-action@v3")); + } + + #[test] + fn docker_build_push_yaml_contains_build_push_action() { + let yaml = render_docker_build_push_yaml("ghcr.io/app:sha", "Dockerfile", "."); + assert!(yaml.contains("docker/build-push-action@v6")); + } + + #[test] + fn docker_build_push_yaml_sets_push_true() { + let yaml = render_docker_build_push_yaml("ghcr.io/app:sha", "Dockerfile", "."); + assert!(yaml.contains("push: true")); + } + + #[test] + fn docker_build_push_yaml_uses_gha_cache() { + let yaml = render_docker_build_push_yaml("ghcr.io/app:sha", "Dockerfile", "."); + assert!(yaml.contains("cache-from: type=gha")); + assert!(yaml.contains("cache-to: type=gha,mode=max")); + } + + #[test] + fn docker_build_push_yaml_includes_image_tag() { + let yaml = render_docker_build_push_yaml("ghcr.io/my-app:abc", "Dockerfile", "."); + assert!(yaml.contains("ghcr.io/my-app:abc")); + } + + // ── registry_secrets_doc_entries ─────────────────────────────────── + + #[test] + fn ghcr_secrets_doc_mentions_automatic() { + let config = generate_registry_config(&Registry::Ghcr); + let doc = registry_secrets_doc_entries(&config); + assert!(doc.contains("automatic")); + } + + #[test] + fn acr_secrets_doc_mentions_login_server() { + let config = generate_registry_config(&Registry::Acr); + let doc = registry_secrets_doc_entries(&config); + assert!(doc.contains("ACR_LOGIN_SERVER")); + } + + #[test] + fn gar_secrets_doc_mentions_location() { + let config = generate_registry_config(&Registry::Gar); + let doc = registry_secrets_doc_entries(&config); + assert!(doc.contains("GAR_LOCATION")); + assert!(doc.contains("GCP_PROJECT_ID")); + } + + #[test] + fn custom_secrets_doc_mentions_custom_url() { + let config = generate_registry_config(&Registry::Custom("reg.io".to_string())); + let doc = registry_secrets_doc_entries(&config); + assert!(doc.contains("reg.io")); + } +} diff --git a/src/generator/cd_generation/reusable_workflow.rs b/src/generator/cd_generation/reusable_workflow.rs new file mode 100644 index 00000000..fea0d8b1 --- /dev/null +++ b/src/generator/cd_generation/reusable_workflow.rs @@ -0,0 +1,310 @@ +//! CD-14 — Reusable Workflow Extraction +//! +//! For multi-environment setups, extracts common deploy logic into +//! `.github/workflows/_deploy-base.yml` as a `workflow_call` reusable workflow. +//! +//! Environment-specific workflows call this base with environment-specific inputs: +//! +//! ```yaml +//! jobs: +//! deploy: +//! uses: ./.github/workflows/_deploy-base.yml +//! with: +//! environment: production +//! image_tag: ${{ needs.build.outputs.image }} +//! secrets: inherit +//! ``` + +use super::context::{CdPlatform, DeployTarget}; + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Returns the filename for the reusable workflow base file. +pub fn base_workflow_filename() -> &'static str { + "_deploy-base.yml" +} + +/// Generates the reusable `_deploy-base.yml` workflow content. +/// +/// This workflow declares `workflow_call` inputs for `environment` and +/// `image_tag`, then runs the common deploy steps. +pub fn render_reusable_base( + platform: &CdPlatform, + target: &DeployTarget, + project_name: &str, +) -> String { + let mut yaml = String::new(); + + // Header + yaml.push_str(&format!( + "# Auto-generated by sync-ctl — Reusable deploy base for {project_name}\n" + )); + yaml.push_str(&format!("# Platform: {platform:?} | Target: {target}\n")); + yaml.push_str("name: _deploy-base\n\n"); + + // Trigger — workflow_call only + yaml.push_str("on:\n"); + yaml.push_str(" workflow_call:\n"); + yaml.push_str(" inputs:\n"); + yaml.push_str(" environment:\n"); + yaml.push_str(" description: 'Deployment environment'\n"); + yaml.push_str(" required: true\n"); + yaml.push_str(" type: string\n"); + yaml.push_str(" image_tag:\n"); + yaml.push_str(" description: 'Docker image tag to deploy'\n"); + yaml.push_str(" required: true\n"); + yaml.push_str(" type: string\n"); + + // Permissions + yaml.push_str("\npermissions:\n"); + match platform { + CdPlatform::Azure | CdPlatform::Gcp => { + yaml.push_str(" id-token: write\n"); + yaml.push_str(" contents: read\n"); + } + CdPlatform::Hetzner => { + yaml.push_str(" packages: write\n"); + yaml.push_str(" contents: read\n"); + } + } + + // Job + yaml.push_str("\njobs:\n"); + yaml.push_str(" deploy:\n"); + yaml.push_str(" runs-on: ubuntu-latest\n"); + yaml.push_str(" environment: ${{ inputs.environment }}\n"); + yaml.push_str(" steps:\n"); + yaml.push_str(" - uses: actions/checkout@v4\n\n"); + yaml.push_str(&render_base_deploy_steps(platform, target)); + + yaml +} + +/// Generates a caller workflow snippet that invokes the base. +/// +/// One of these is emitted per environment (e.g., `deploy-staging.yml`). +pub fn render_caller_job( + environment_name: &str, + image_tag_expression: &str, + needs: Option<&str>, +) -> String { + let mut yaml = String::new(); + + yaml.push_str(&format!( + " deploy-{environment_name}:\n" + )); + yaml.push_str(&format!( + " uses: ./.github/workflows/{}\n", + base_workflow_filename() + )); + if let Some(dep) = needs { + yaml.push_str(&format!(" needs: {dep}\n")); + } + yaml.push_str(" with:\n"); + yaml.push_str(&format!( + " environment: {environment_name}\n" + )); + yaml.push_str(&format!( + " image_tag: {image_tag_expression}\n" + )); + yaml.push_str(" secrets: inherit\n"); + + yaml +} + +// ── Helpers ─────────────────────────────────────────────────────────────────── + +fn render_base_deploy_steps(platform: &CdPlatform, target: &DeployTarget) -> String { + let mut yaml = String::new(); + + // Auth step placeholder + match platform { + CdPlatform::Azure => { + yaml.push_str(" - name: Azure login (OIDC)\n"); + yaml.push_str(" uses: azure/login@v2\n"); + yaml.push_str(" with:\n"); + yaml.push_str(" client-id: ${{ secrets.AZURE_CLIENT_ID }}\n"); + yaml.push_str(" tenant-id: ${{ secrets.AZURE_TENANT_ID }}\n"); + yaml.push_str(" subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}\n\n"); + } + CdPlatform::Gcp => { + yaml.push_str(" - name: Authenticate to GCP\n"); + yaml.push_str(" uses: google-github-actions/auth@v2\n"); + yaml.push_str(" with:\n"); + yaml.push_str(" workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}\n"); + yaml.push_str(" service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}\n\n"); + } + CdPlatform::Hetzner => { + yaml.push_str(" - name: Set up SSH agent\n"); + yaml.push_str(" uses: webfactory/ssh-agent@v0.9.0\n"); + yaml.push_str(" with:\n"); + yaml.push_str(" ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}\n\n"); + } + } + + // Deploy step placeholder + yaml.push_str(&format!( + " - name: Deploy (${{{{ inputs.environment }}}}) to {target}\n" + )); + yaml.push_str(" run: |\n"); + yaml.push_str(" echo \"Deploying ${{ inputs.image_tag }} to ${{ inputs.environment }}\"\n"); + yaml.push_str(&deploy_command_for_target(target)); + + yaml +} + +fn deploy_command_for_target(target: &DeployTarget) -> String { + match target { + DeployTarget::AppService => { + " # az webapp config container set --name $APP_NAME --image ${{ inputs.image_tag }}\n".to_string() + } + DeployTarget::Aks | DeployTarget::Gke | DeployTarget::HetznerK8s => { + " # kubectl set image deployment/$DEPLOYMENT_NAME app=${{ inputs.image_tag }}\n".to_string() + } + DeployTarget::ContainerApps => { + " # az containerapp update --name $APP_NAME --image ${{ inputs.image_tag }}\n".to_string() + } + DeployTarget::CloudRun => { + " # gcloud run deploy $SERVICE --image ${{ inputs.image_tag }}\n".to_string() + } + DeployTarget::Vps => { + " # ssh $SSH_USER@$SSH_HOST \"docker pull ${{ inputs.image_tag }} && docker compose up -d\"\n".to_string() + } + DeployTarget::Coolify => { + " # curl -X POST $COOLIFY_WEBHOOK_URL\n".to_string() + } + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn base_filename_is_underscore_prefixed() { + assert!(base_workflow_filename().starts_with('_')); + assert!(base_workflow_filename().ends_with(".yml")); + } + + #[test] + fn base_contains_workflow_call() { + let yaml = render_reusable_base( + &CdPlatform::Azure, + &DeployTarget::AppService, + "my-app", + ); + assert!(yaml.contains("workflow_call:")); + } + + #[test] + fn base_has_environment_input() { + let yaml = render_reusable_base( + &CdPlatform::Azure, + &DeployTarget::AppService, + "my-app", + ); + assert!(yaml.contains("environment:")); + assert!(yaml.contains("type: string")); + } + + #[test] + fn base_has_image_tag_input() { + let yaml = render_reusable_base( + &CdPlatform::Azure, + &DeployTarget::AppService, + "my-app", + ); + assert!(yaml.contains("image_tag:")); + } + + #[test] + fn azure_base_has_oidc_permissions() { + let yaml = render_reusable_base( + &CdPlatform::Azure, + &DeployTarget::AppService, + "my-app", + ); + assert!(yaml.contains("id-token: write")); + } + + #[test] + fn hetzner_base_has_packages_permission() { + let yaml = render_reusable_base( + &CdPlatform::Hetzner, + &DeployTarget::Vps, + "my-app", + ); + assert!(yaml.contains("packages: write")); + } + + #[test] + fn gcp_base_has_wif_auth() { + let yaml = render_reusable_base( + &CdPlatform::Gcp, + &DeployTarget::CloudRun, + "my-app", + ); + assert!(yaml.contains("google-github-actions/auth@v2")); + assert!(yaml.contains("workload_identity_provider")); + } + + #[test] + fn caller_job_uses_base_workflow() { + let yaml = render_caller_job("staging", "${{ needs.build.outputs.image }}", None); + assert!(yaml.contains("uses: ./.github/workflows/_deploy-base.yml")); + } + + #[test] + fn caller_job_passes_environment() { + let yaml = render_caller_job("production", "${{ env.IMAGE_TAG }}", None); + assert!(yaml.contains("environment: production")); + } + + #[test] + fn caller_job_passes_image_tag() { + let yaml = render_caller_job("staging", "${{ env.IMAGE_TAG }}", None); + assert!(yaml.contains("image_tag: ${{ env.IMAGE_TAG }}")); + } + + #[test] + fn caller_job_includes_needs_when_provided() { + let yaml = + render_caller_job("production", "${{ env.IMAGE_TAG }}", Some("deploy-staging")); + assert!(yaml.contains("needs: deploy-staging")); + } + + #[test] + fn caller_job_no_needs_when_none() { + let yaml = render_caller_job("dev", "${{ env.IMAGE_TAG }}", None); + assert!(!yaml.contains("needs:")); + } + + #[test] + fn caller_job_inherits_secrets() { + let yaml = render_caller_job("staging", "${{ env.IMAGE_TAG }}", None); + assert!(yaml.contains("secrets: inherit")); + } + + #[test] + fn base_contains_checkout() { + let yaml = render_reusable_base( + &CdPlatform::Azure, + &DeployTarget::AppService, + "my-app", + ); + assert!(yaml.contains("actions/checkout@v4")); + } + + #[test] + fn base_contains_deploy_step() { + let yaml = render_reusable_base( + &CdPlatform::Gcp, + &DeployTarget::CloudRun, + "my-app", + ); + assert!(yaml.contains("Deploy (")); + assert!(yaml.contains("inputs.image_tag")); + } +} diff --git a/src/generator/cd_generation/rollback.rs b/src/generator/cd_generation/rollback.rs new file mode 100644 index 00000000..e21503d1 --- /dev/null +++ b/src/generator/cd_generation/rollback.rs @@ -0,0 +1,318 @@ +//! CD-13 — Rollback Step & Comments +//! +//! Generates a `.syncable/scripts/rollback.sh` script and YAML comment +//! blocks for rollback instructions. The script is parameterized per +//! platform and deploy target. +//! +//! The script accepts an `IMAGE_TAG` argument and runs the platform-specific +//! rollback command. + +use super::context::{CdPlatform, DeployTarget}; +use super::schema::RollbackInfo; + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Generates the contents of `.syncable/scripts/rollback.sh`. +/// +/// The script is a standalone Bash script that the user can run to rollback +/// to a specific image tag. It accepts the previous image tag as `$1`. +pub fn generate_rollback_script( + platform: &CdPlatform, + target: &DeployTarget, + rollback_info: &RollbackInfo, +) -> String { + let mut script = String::new(); + + script.push_str("#!/usr/bin/env bash\n"); + script.push_str("# Auto-generated by sync-ctl — Rollback script\n"); + script.push_str(&format!( + "# Platform: {:?} | Target: {} | Strategy: {}\n", + platform, target, rollback_info.strategy + )); + script.push_str("#\n"); + script.push_str("# Usage: ./rollback.sh \n"); + script.push_str("# e.g. ./rollback.sh ghcr.io/org/app:abc123\n"); + script.push_str("\nset -euo pipefail\n\n"); + + script.push_str("PREV_TAG=\"${1:?Usage: ./rollback.sh }\"\n\n"); + + script.push_str(&format!( + "echo \"šŸ”„ Rolling back to $PREV_TAG (strategy: {})\"\n\n", + rollback_info.strategy + )); + + script.push_str(&rollback_commands(platform, target)); + + script.push_str("\necho \"āœ… Rollback complete\"\n"); + + script +} + +/// Renders the YAML comment block for rollback that goes at the end of each +/// deploy job. Enhanced version with script reference. +pub fn render_rollback_yaml_comment( + rollback_info: &RollbackInfo, +) -> String { + format!( + "\ +# ── Rollback ────────────────────────────────────────────────── +# Strategy: {} +# Command: {} +# +# Automated script: .syncable/scripts/rollback.sh +", + rollback_info.strategy, rollback_info.command_hint + ) +} + +/// Returns the relative path for the rollback script. +pub fn rollback_script_path() -> &'static str { + ".syncable/scripts/rollback.sh" +} + +// ── Helpers ─────────────────────────────────────────────────────────────────── + +fn rollback_commands(platform: &CdPlatform, target: &DeployTarget) -> String { + match (platform, target) { + (CdPlatform::Azure, DeployTarget::AppService) => { + "\ +# Azure App Service rollback +az webapp config container set \\ + --resource-group \"${RESOURCE_GROUP:?Set RESOURCE_GROUP}\" \\ + --name \"${APP_NAME:?Set APP_NAME}\" \\ + --container-image-name \"$PREV_TAG\" +" + .to_string() + } + (CdPlatform::Azure, DeployTarget::Aks) => { + "\ +# AKS rollback via kubectl +kubectl rollout undo deployment/\"${DEPLOYMENT_NAME:?Set DEPLOYMENT_NAME}\" \\ + -n \"${K8S_NAMESPACE:-default}\" +# Or deploy a specific image: +# kubectl set image deployment/$DEPLOYMENT_NAME app=$PREV_TAG -n $K8S_NAMESPACE +" + .to_string() + } + (CdPlatform::Azure, DeployTarget::ContainerApps) => { + "\ +# Azure Container Apps rollback +az containerapp update \\ + --name \"${APP_NAME:?Set APP_NAME}\" \\ + --resource-group \"${RESOURCE_GROUP:?Set RESOURCE_GROUP}\" \\ + --image \"$PREV_TAG\" +" + .to_string() + } + (CdPlatform::Gcp, DeployTarget::CloudRun) => { + "\ +# Cloud Run rollback — shift traffic to previous revision +gcloud run services update-traffic \"${CLOUD_RUN_SERVICE:?Set CLOUD_RUN_SERVICE}\" \\ + --region=\"${GCP_REGION:?Set GCP_REGION}\" \\ + --to-revisions=LATEST=0 +# Or deploy a specific image: +# gcloud run deploy $CLOUD_RUN_SERVICE --image=$PREV_TAG --region=$GCP_REGION +" + .to_string() + } + (CdPlatform::Gcp, DeployTarget::Gke) => { + "\ +# GKE rollback via kubectl +kubectl rollout undo deployment/\"${DEPLOYMENT_NAME:?Set DEPLOYMENT_NAME}\" \\ + -n \"${K8S_NAMESPACE:-default}\" +" + .to_string() + } + (CdPlatform::Hetzner, DeployTarget::Vps) => { + "\ +# Hetzner VPS rollback via SSH +ssh \"${SSH_USER:?Set SSH_USER}@${SSH_HOST:?Set SSH_HOST}\" << 'EOF' + docker compose down + docker pull \"$PREV_TAG\" + docker compose up -d +EOF +" + .to_string() + } + (CdPlatform::Hetzner, DeployTarget::HetznerK8s) => { + "\ +# Hetzner K8s rollback via kubectl +kubectl rollout undo deployment/\"${DEPLOYMENT_NAME:?Set DEPLOYMENT_NAME}\" \\ + -n \"${K8S_NAMESPACE:-default}\" +" + .to_string() + } + (CdPlatform::Hetzner, DeployTarget::Coolify) => { + "\ +# Coolify rollback — trigger webhook with previous tag +curl -X POST \"${COOLIFY_WEBHOOK_URL:?Set COOLIFY_WEBHOOK_URL}\" \\ + -H 'Content-Type: application/json' \\ + -d \"{\\\"image\\\": \\\"$PREV_TAG\\\"}\" +" + .to_string() + } + _ => { + "\ +# Manual rollback — redeploy the previous image tag +echo \"Redeploy $PREV_TAG using your platform's CLI or dashboard\" +" + .to_string() + } + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn script_has_shebang() { + let info = RollbackInfo { + strategy: "redeploy-previous".to_string(), + command_hint: "az webapp ...".to_string(), + }; + let script = + generate_rollback_script(&CdPlatform::Azure, &DeployTarget::AppService, &info); + assert!(script.starts_with("#!/usr/bin/env bash")); + } + + #[test] + fn script_has_set_euo_pipefail() { + let info = RollbackInfo { + strategy: "manual".to_string(), + command_hint: "ssh ...".to_string(), + }; + let script = + generate_rollback_script(&CdPlatform::Hetzner, &DeployTarget::Vps, &info); + assert!(script.contains("set -euo pipefail")); + } + + #[test] + fn script_accepts_prev_tag_argument() { + let info = RollbackInfo { + strategy: "manual".to_string(), + command_hint: "...".to_string(), + }; + let script = + generate_rollback_script(&CdPlatform::Azure, &DeployTarget::AppService, &info); + assert!(script.contains("PREV_TAG=\"${1:?")); + } + + #[test] + fn azure_app_service_script_uses_az_webapp() { + let info = RollbackInfo { + strategy: "redeploy-previous".to_string(), + command_hint: "...".to_string(), + }; + let script = + generate_rollback_script(&CdPlatform::Azure, &DeployTarget::AppService, &info); + assert!(script.contains("az webapp config container set")); + } + + #[test] + fn aks_script_uses_kubectl_rollout() { + let info = RollbackInfo { + strategy: "rollout-undo".to_string(), + command_hint: "...".to_string(), + }; + let script = + generate_rollback_script(&CdPlatform::Azure, &DeployTarget::Aks, &info); + assert!(script.contains("kubectl rollout undo")); + } + + #[test] + fn cloud_run_script_uses_gcloud() { + let info = RollbackInfo { + strategy: "traffic-shift".to_string(), + command_hint: "...".to_string(), + }; + let script = + generate_rollback_script(&CdPlatform::Gcp, &DeployTarget::CloudRun, &info); + assert!(script.contains("gcloud run services update-traffic")); + } + + #[test] + fn gke_script_uses_kubectl() { + let info = RollbackInfo { + strategy: "rollout-undo".to_string(), + command_hint: "...".to_string(), + }; + let script = + generate_rollback_script(&CdPlatform::Gcp, &DeployTarget::Gke, &info); + assert!(script.contains("kubectl rollout undo")); + } + + #[test] + fn vps_script_uses_ssh() { + let info = RollbackInfo { + strategy: "manual".to_string(), + command_hint: "...".to_string(), + }; + let script = + generate_rollback_script(&CdPlatform::Hetzner, &DeployTarget::Vps, &info); + assert!(script.contains("ssh \"${SSH_USER")); + } + + #[test] + fn coolify_script_uses_webhook() { + let info = RollbackInfo { + strategy: "manual".to_string(), + command_hint: "...".to_string(), + }; + let script = + generate_rollback_script(&CdPlatform::Hetzner, &DeployTarget::Coolify, &info); + assert!(script.contains("COOLIFY_WEBHOOK_URL")); + } + + #[test] + fn hetzner_k8s_script_uses_kubectl() { + let info = RollbackInfo { + strategy: "rollout-undo".to_string(), + command_hint: "...".to_string(), + }; + let script = + generate_rollback_script(&CdPlatform::Hetzner, &DeployTarget::HetznerK8s, &info); + assert!(script.contains("kubectl rollout undo")); + } + + #[test] + fn yaml_comment_contains_strategy() { + let info = RollbackInfo { + strategy: "redeploy-previous".to_string(), + command_hint: "az webapp ...".to_string(), + }; + let yaml = render_rollback_yaml_comment(&info); + assert!(yaml.contains("Strategy: redeploy-previous")); + } + + #[test] + fn yaml_comment_references_script() { + let info = RollbackInfo { + strategy: "manual".to_string(), + command_hint: "...".to_string(), + }; + let yaml = render_rollback_yaml_comment(&info); + assert!(yaml.contains(".syncable/scripts/rollback.sh")); + } + + #[test] + fn rollback_script_path_is_correct() { + assert_eq!(rollback_script_path(), ".syncable/scripts/rollback.sh"); + } + + #[test] + fn container_apps_script_uses_az_containerapp() { + let info = RollbackInfo { + strategy: "redeploy-previous".to_string(), + command_hint: "...".to_string(), + }; + let script = generate_rollback_script( + &CdPlatform::Azure, + &DeployTarget::ContainerApps, + &info, + ); + assert!(script.contains("az containerapp update")); + } +} diff --git a/src/generator/cd_generation/schema.rs b/src/generator/cd_generation/schema.rs new file mode 100644 index 00000000..893b62f9 --- /dev/null +++ b/src/generator/cd_generation/schema.rs @@ -0,0 +1,589 @@ +//! CD Pipeline Schema — CD-17 +//! +//! Defines the canonical, platform-agnostic `CdPipeline` intermediate +//! representation. Template builders (CD-18, CD-19, CD-20) render YAML +//! from this struct, not directly from `CdContext`. This mirrors the CI +//! schema pattern: context collection → schema → template rendering. + +use serde::Serialize; + +use super::context::{CdPlatform, DeployTarget, MigrationTool, Registry}; + +// ── Unresolved token ────────────────────────────────────────────────────────── + +/// A placeholder that could not be filled deterministically from project files. +/// +/// Serialised into `cd-manifest.toml [unresolved]` so the agent fill phase +/// and interactive prompts know exactly what still needs a human decision. +#[derive(Debug, Clone, Serialize, PartialEq, Eq)] +pub struct UnresolvedToken { + /// Token name as it appears in the YAML output, e.g. `"REGISTRY_URL"`. + pub name: String, + /// The `{{TOKEN_NAME}}` string injected into the generated YAML. + pub placeholder: String, + /// Human-readable hint for what value to supply. + pub hint: String, + /// Type annotation used in the manifest file (e.g. `"string"`, `"url"`). + pub token_type: String, +} + +impl UnresolvedToken { + pub fn new(name: &str, hint: &str, token_type: &str) -> Self { + Self { + name: name.to_string(), + placeholder: format!("{{{{{}}}}}", name), + hint: hint.to_string(), + token_type: token_type.to_string(), + } + } +} + +// ── Step structs ────────────────────────────────────────────────────────────── + +/// Cloud provider authentication step. +/// +/// Azure uses OIDC federation, GCP uses Workload Identity Federation, +/// Hetzner uses SSH keys or API tokens. +#[derive(Debug, Clone, Serialize)] +pub struct AuthStep { + /// GitHub Actions action, e.g. `"azure/login@v2"` or `"google-github-actions/auth@v2"`. + pub action: Option, + /// Method description: `"oidc"`, `"workload-identity"`, `"ssh"`, `"api-token"`. + pub method: String, + /// Secrets that must be configured in the repo (e.g. `"AZURE_CLIENT_ID"`). + pub required_secrets: Vec, +} + +/// Container registry login step. +#[derive(Debug, Clone, Serialize)] +pub struct RegistryStep { + /// Registry type from context. + pub registry: Registry, + /// Login action, e.g. `"docker/login-action@v3"` or a shell command. + pub login_action: Option, + /// Full registry URL or placeholder, e.g. `"ghcr.io"` or `"{{REGISTRY_URL}}"`. + pub registry_url: String, +} + +/// Docker build and push step. +#[derive(Debug, Clone, Serialize)] +pub struct DockerBuildPushStep { + /// Full image reference including registry and tag placeholder. + /// e.g. `"ghcr.io/org/app:${{ github.sha }}"`. + pub image_tag: String, + /// Build context path relative to repo root. + pub context: String, + /// Dockerfile path relative to repo root. + pub dockerfile: String, + /// Whether to push the image (always `true` for CD). + pub push: bool, + /// Enable multi-platform via `docker/setup-buildx-action`. + pub buildx: bool, + /// Build arguments to pass, e.g. `["BUILD_ENV=production"]`. + pub build_args: Vec, +} + +/// Database migration step — omitted when no migration tool detected. +#[derive(Debug, Clone, Serialize)] +pub struct MigrationStep { + /// Tool name for logging and comments. + pub tool: MigrationTool, + /// Shell command to run migrations. + /// e.g. `"npx prisma migrate deploy"`, `"alembic upgrade head"`. + pub command: String, + /// Whether migration runs via SSH (Hetzner VPS pattern). + pub via_ssh: bool, +} + +/// Terraform plan + apply step — omitted when no terraform directory found. +#[derive(Debug, Clone, Serialize)] +pub struct TerraformStep { + /// Working directory for `terraform` commands. + pub working_directory: String, + /// Version of Terraform to set up, or `{{TERRAFORM_VERSION}}`. + pub version: String, + /// Backend configuration arguments (e.g. `["-backend-config=env/prod.hcl"]`). + pub backend_config: Vec, + /// Whether to auto-approve `terraform apply` (typically only in non-prod). + pub auto_approve: bool, +} + +/// Platform-specific deployment step. +#[derive(Debug, Clone, Serialize)] +pub struct DeployStep { + /// Human-readable strategy label: `"rolling"`, `"blue-green"`, `"canary"`, `"recreate"`. + pub strategy: String, + /// Primary deploy command or action. + pub command: String, + /// Additional arguments for the deploy command. + pub args: Vec, + /// The deployment target for reference. + pub target: DeployTarget, +} + +/// Post-deployment health check step. +#[derive(Debug, Clone, Serialize)] +pub struct HealthCheckStep { + /// URL to probe, e.g. `"https://{{APP_URL}}/health"`. + pub url: String, + /// Maximum number of retry attempts. + pub retries: u32, + /// Delay between retries in seconds. + pub interval_secs: u32, + /// Expected HTTP status code (typically `200`). + pub expected_status: u16, +} + +/// Rollback metadata — not an executable step, but information baked into +/// the generated YAML comments and manifest. +#[derive(Debug, Clone, Serialize)] +pub struct RollbackInfo { + /// Rollback strategy description: `"redeploy-previous"`, `"helm-rollback"`, `"manual"`. + pub strategy: String, + /// Shell command suggestion for manual rollback. + pub command_hint: String, +} + +/// Slack (or other) deployment notification step. +#[derive(Debug, Clone, Serialize)] +pub struct NotificationStep { + /// Channel or webhook approach: `"slack-webhook"`, `"teams-webhook"`. + pub channel_type: String, + /// Secret name for the webhook URL, e.g. `"SLACK_WEBHOOK_URL"`. + pub webhook_secret: String, + /// Whether to send on success. + pub on_success: bool, + /// Whether to send on failure. + pub on_failure: bool, +} + +/// Per-environment configuration used when rendering per-env deploy jobs. +#[derive(Debug, Clone, Serialize)] +pub struct EnvironmentConfig { + /// Environment name: `"dev"`, `"staging"`, `"production"`. + pub name: String, + /// Branch or tag filter for this environment. + pub branch_filter: Option, + /// Whether this environment requires a GitHub environment protection rule + /// (manual approval). + pub requires_approval: bool, + /// URL of the running application in this environment, or placeholder. + pub app_url: Option, + /// Optional Kubernetes namespace override. + pub namespace: Option, + /// Optional replica count override for this environment. + pub replicas: Option, +} + +// ── Top-level pipeline ──────────────────────────────────────────────────────── + +/// Platform-agnostic intermediate representation of a complete CD pipeline. +/// +/// Template builders (CD-18, CD-19, CD-20) render YAML from this struct. +/// The agent fill phase patches individual fields without re-running full +/// context collection. +#[derive(Debug, Clone, Serialize)] +pub struct CdPipeline { + /// Human-readable project name. + pub project_name: String, + /// Target cloud platform. + pub platform: CdPlatform, + /// Concrete deployment target. + pub deploy_target: DeployTarget, + /// Ordered list of environment configs (dev → staging → production). + pub environments: Vec, + /// Cloud provider authentication step. + pub auth: AuthStep, + /// Container registry login step. + pub registry: RegistryStep, + /// Docker build and push step. + pub docker_build_push: DockerBuildPushStep, + /// Database migration step (omitted if not detected). + pub migration: Option, + /// Terraform step (omitted if not detected). + pub terraform: Option, + /// Deployment step. + pub deploy: DeployStep, + /// Post-deployment health check. + pub health_check: HealthCheckStep, + /// Rollback info baked into manifest and YAML comments. + pub rollback_info: RollbackInfo, + /// Optional deployment notification step. + pub notifications: Option, + /// Tokens that could not be resolved deterministically. + pub unresolved_tokens: Vec, + /// Default git branch. + pub default_branch: String, + /// Docker image name (without registry or tag). + pub image_name: String, +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::generator::cd_generation::context::{ + CdPlatform, DeployTarget, MigrationTool, Registry, + }; + + #[test] + fn unresolved_token_new_formats_placeholder() { + let token = UnresolvedToken::new("REGISTRY_URL", "Your ACR login server", "url"); + assert_eq!(token.name, "REGISTRY_URL"); + assert_eq!(token.placeholder, "{{REGISTRY_URL}}"); + assert_eq!(token.hint, "Your ACR login server"); + assert_eq!(token.token_type, "url"); + } + + #[test] + fn auth_step_azure_oidc() { + let step = AuthStep { + action: Some("azure/login@v2".to_string()), + method: "oidc".to_string(), + required_secrets: vec![ + "AZURE_CLIENT_ID".to_string(), + "AZURE_TENANT_ID".to_string(), + "AZURE_SUBSCRIPTION_ID".to_string(), + ], + }; + assert_eq!(step.method, "oidc"); + assert_eq!(step.required_secrets.len(), 3); + } + + #[test] + fn auth_step_gcp_workload_identity() { + let step = AuthStep { + action: Some("google-github-actions/auth@v2".to_string()), + method: "workload-identity".to_string(), + required_secrets: vec![ + "GCP_WORKLOAD_IDENTITY_PROVIDER".to_string(), + "GCP_SERVICE_ACCOUNT".to_string(), + ], + }; + assert_eq!(step.method, "workload-identity"); + assert_eq!(step.required_secrets.len(), 2); + } + + #[test] + fn auth_step_hetzner_ssh() { + let step = AuthStep { + action: None, + method: "ssh".to_string(), + required_secrets: vec![ + "SSH_PRIVATE_KEY".to_string(), + "SSH_HOST".to_string(), + ], + }; + assert!(step.action.is_none()); + assert_eq!(step.method, "ssh"); + } + + #[test] + fn registry_step_ghcr() { + let step = RegistryStep { + registry: Registry::Ghcr, + login_action: Some("docker/login-action@v3".to_string()), + registry_url: "ghcr.io".to_string(), + }; + assert_eq!(step.registry_url, "ghcr.io"); + } + + #[test] + fn registry_step_acr_with_placeholder() { + let step = RegistryStep { + registry: Registry::Acr, + login_action: Some("azure/docker-login@v2".to_string()), + registry_url: "{{ACR_LOGIN_SERVER}}".to_string(), + }; + assert!(step.registry_url.contains("{{")); + } + + #[test] + fn docker_build_push_step() { + let step = DockerBuildPushStep { + image_tag: "ghcr.io/org/app:abc123".to_string(), + context: ".".to_string(), + dockerfile: "Dockerfile".to_string(), + push: true, + buildx: false, + build_args: vec!["BUILD_ENV=production".to_string()], + }; + assert!(step.push); + assert!(!step.buildx); + assert_eq!(step.build_args.len(), 1); + } + + #[test] + fn migration_step_prisma() { + let step = MigrationStep { + tool: MigrationTool::Prisma, + command: "npx prisma migrate deploy".to_string(), + via_ssh: false, + }; + assert_eq!(step.tool, MigrationTool::Prisma); + assert!(!step.via_ssh); + } + + #[test] + fn migration_step_via_ssh() { + let step = MigrationStep { + tool: MigrationTool::Alembic, + command: "ssh deploy@host 'cd /app && alembic upgrade head'".to_string(), + via_ssh: true, + }; + assert!(step.via_ssh); + } + + #[test] + fn terraform_step_defaults() { + let step = TerraformStep { + working_directory: "terraform/".to_string(), + version: "{{TERRAFORM_VERSION}}".to_string(), + backend_config: vec![], + auto_approve: false, + }; + assert!(!step.auto_approve); + assert!(step.version.contains("{{")); + } + + #[test] + fn deploy_step_cloud_run() { + let step = DeployStep { + strategy: "rolling".to_string(), + command: "gcloud run deploy".to_string(), + args: vec![ + "--image={{IMAGE_TAG}}".to_string(), + "--region={{GCP_REGION}}".to_string(), + ], + target: DeployTarget::CloudRun, + }; + assert_eq!(step.strategy, "rolling"); + assert_eq!(step.target, DeployTarget::CloudRun); + } + + #[test] + fn health_check_step_defaults() { + let step = HealthCheckStep { + url: "https://{{APP_URL}}/health".to_string(), + retries: 5, + interval_secs: 10, + expected_status: 200, + }; + assert_eq!(step.retries, 5); + assert_eq!(step.expected_status, 200); + } + + #[test] + fn rollback_info() { + let info = RollbackInfo { + strategy: "redeploy-previous".to_string(), + command_hint: "az webapp deployment slot swap --slot staging".to_string(), + }; + assert_eq!(info.strategy, "redeploy-previous"); + } + + #[test] + fn notification_step_slack() { + let step = NotificationStep { + channel_type: "slack-webhook".to_string(), + webhook_secret: "SLACK_WEBHOOK_URL".to_string(), + on_success: true, + on_failure: true, + }; + assert!(step.on_success); + assert!(step.on_failure); + } + + #[test] + fn environment_config_production_with_approval() { + let env = EnvironmentConfig { + name: "production".to_string(), + branch_filter: Some("main".to_string()), + requires_approval: true, + app_url: Some("https://myapp.com".to_string()), + namespace: Some("prod".to_string()), + replicas: Some(3), + }; + assert!(env.requires_approval); + assert_eq!(env.replicas, Some(3)); + } + + #[test] + fn environment_config_dev_no_approval() { + let env = EnvironmentConfig { + name: "dev".to_string(), + branch_filter: Some("develop".to_string()), + requires_approval: false, + app_url: None, + namespace: None, + replicas: None, + }; + assert!(!env.requires_approval); + assert!(env.app_url.is_none()); + } + + #[test] + fn cd_pipeline_full_assembly() { + let pipeline = CdPipeline { + project_name: "my-app".to_string(), + platform: CdPlatform::Azure, + deploy_target: DeployTarget::ContainerApps, + environments: vec![ + EnvironmentConfig { + name: "dev".to_string(), + branch_filter: Some("develop".to_string()), + requires_approval: false, + app_url: None, + namespace: None, + replicas: None, + }, + EnvironmentConfig { + name: "production".to_string(), + branch_filter: Some("main".to_string()), + requires_approval: true, + app_url: Some("https://my-app.azurewebsites.net".to_string()), + namespace: None, + replicas: Some(2), + }, + ], + auth: AuthStep { + action: Some("azure/login@v2".to_string()), + method: "oidc".to_string(), + required_secrets: vec![ + "AZURE_CLIENT_ID".to_string(), + "AZURE_TENANT_ID".to_string(), + "AZURE_SUBSCRIPTION_ID".to_string(), + ], + }, + registry: RegistryStep { + registry: Registry::Acr, + login_action: Some("azure/docker-login@v2".to_string()), + registry_url: "{{ACR_LOGIN_SERVER}}".to_string(), + }, + docker_build_push: DockerBuildPushStep { + image_tag: "{{ACR_LOGIN_SERVER}}/my-app:abc123".to_string(), + context: ".".to_string(), + dockerfile: "Dockerfile".to_string(), + push: true, + buildx: false, + build_args: vec![], + }, + migration: Some(MigrationStep { + tool: MigrationTool::Prisma, + command: "npx prisma migrate deploy".to_string(), + via_ssh: false, + }), + terraform: None, + deploy: DeployStep { + strategy: "rolling".to_string(), + command: "az containerapp update".to_string(), + args: vec![ + "--name={{APP_NAME}}".to_string(), + "--resource-group={{RESOURCE_GROUP}}".to_string(), + ], + target: DeployTarget::ContainerApps, + }, + health_check: HealthCheckStep { + url: "https://{{APP_URL}}/health".to_string(), + retries: 5, + interval_secs: 10, + expected_status: 200, + }, + rollback_info: RollbackInfo { + strategy: "redeploy-previous".to_string(), + command_hint: "az containerapp revision activate --revision ".to_string(), + }, + notifications: Some(NotificationStep { + channel_type: "slack-webhook".to_string(), + webhook_secret: "SLACK_WEBHOOK_URL".to_string(), + on_success: true, + on_failure: true, + }), + unresolved_tokens: vec![ + UnresolvedToken::new("ACR_LOGIN_SERVER", "Your Azure Container Registry login server URL", "url"), + UnresolvedToken::new("APP_URL", "Public URL of your application", "url"), + UnresolvedToken::new("APP_NAME", "Azure Container App name", "string"), + UnresolvedToken::new("RESOURCE_GROUP", "Azure resource group name", "string"), + ], + default_branch: "main".to_string(), + image_name: "my-app".to_string(), + }; + + assert_eq!(pipeline.project_name, "my-app"); + assert_eq!(pipeline.environments.len(), 2); + assert!(pipeline.migration.is_some()); + assert!(pipeline.terraform.is_none()); + assert!(pipeline.notifications.is_some()); + assert_eq!(pipeline.unresolved_tokens.len(), 4); + assert_eq!(pipeline.default_branch, "main"); + } + + #[test] + fn cd_pipeline_minimal_hetzner_vps() { + let pipeline = CdPipeline { + project_name: "simple-api".to_string(), + platform: CdPlatform::Hetzner, + deploy_target: DeployTarget::Vps, + environments: vec![EnvironmentConfig { + name: "production".to_string(), + branch_filter: Some("main".to_string()), + requires_approval: false, + app_url: None, + namespace: None, + replicas: None, + }], + auth: AuthStep { + action: None, + method: "ssh".to_string(), + required_secrets: vec![ + "SSH_PRIVATE_KEY".to_string(), + "SSH_HOST".to_string(), + ], + }, + registry: RegistryStep { + registry: Registry::Ghcr, + login_action: Some("docker/login-action@v3".to_string()), + registry_url: "ghcr.io".to_string(), + }, + docker_build_push: DockerBuildPushStep { + image_tag: "ghcr.io/user/simple-api:latest".to_string(), + context: ".".to_string(), + dockerfile: "Dockerfile".to_string(), + push: true, + buildx: false, + build_args: vec![], + }, + migration: None, + terraform: None, + deploy: DeployStep { + strategy: "recreate".to_string(), + command: "ssh deploy@host 'docker compose pull && docker compose up -d'".to_string(), + args: vec![], + target: DeployTarget::Vps, + }, + health_check: HealthCheckStep { + url: "http://{{SSH_HOST}}:8080/health".to_string(), + retries: 3, + interval_secs: 5, + expected_status: 200, + }, + rollback_info: RollbackInfo { + strategy: "manual".to_string(), + command_hint: "ssh deploy@host 'docker compose down && docker compose up -d'".to_string(), + }, + notifications: None, + unresolved_tokens: vec![ + UnresolvedToken::new("SSH_HOST", "IP or hostname of your Hetzner VPS", "string"), + ], + default_branch: "main".to_string(), + image_name: "simple-api".to_string(), + }; + + assert_eq!(pipeline.platform, CdPlatform::Hetzner); + assert_eq!(pipeline.deploy_target, DeployTarget::Vps); + assert!(pipeline.migration.is_none()); + assert!(pipeline.notifications.is_none()); + assert_eq!(pipeline.unresolved_tokens.len(), 1); + } +} diff --git a/src/generator/cd_generation/secrets_doc.rs b/src/generator/cd_generation/secrets_doc.rs new file mode 100644 index 00000000..5ad19205 --- /dev/null +++ b/src/generator/cd_generation/secrets_doc.rs @@ -0,0 +1,324 @@ +//! CD-27/28 — CD Secrets Inventory & Hetzner Prerequisites +//! +//! Scans a rendered CD pipeline YAML for `secrets.*` references, deduplicates +//! them, and formats the CD section of `SECRETS_REQUIRED.md`. +//! +//! For Hetzner targets, appends a firewall & network prerequisites checklist +//! (CD-28) so the user knows about SSH keys, firewall rules, and Docker setup. + +use std::collections::BTreeSet; + +use crate::generator::cd_generation::context::CdPlatform; + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Scans `yaml` for secret references and returns the CD portion of the +/// secrets document. +pub fn generate_cd_secrets_doc(yaml: &str, platform: &CdPlatform) -> String { + let names = collect_cd_secret_names(yaml); + let mut doc = render_cd_secrets_table(&names, platform); + + // CD-28: append Hetzner prerequisites when applicable + if *platform == CdPlatform::Hetzner { + doc.push_str(&hetzner_prerequisites_checklist()); + } + + doc +} + +/// Collects all secret names referenced in a CD pipeline YAML. +/// +/// Matches patterns like: +/// - `${{ secrets.FOO }}` +/// - `secrets.FOO` +pub fn collect_cd_secret_names(yaml: &str) -> BTreeSet { + let mut names = BTreeSet::new(); + + // Pattern: secrets.NAME — skip the first segment (text before the first match) + let segments: Vec<&str> = yaml.split("secrets.").collect(); + for segment in segments.iter().skip(1) { + if let Some(name) = extract_secret_name(segment) { + names.insert(name); + } + } + + names +} + +// ── Render ──────────────────────────────────────────────────────────────────── + +fn render_cd_secrets_table(names: &BTreeSet, platform: &CdPlatform) -> String { + if names.is_empty() { + return "No CD secrets detected in the generated pipeline.\n".to_string(); + } + + let mut md = String::new(); + md.push_str("| Secret | Description | How to Obtain |\n"); + md.push_str("|--------|-------------|---------------|\n"); + + for name in names { + let (desc, how) = secret_metadata(name, platform); + md.push_str(&format!("| `{name}` | {desc} | {how} |\n")); + } + + md +} + +/// Returns (description, how_to_obtain) for well-known CD secrets. +fn secret_metadata(name: &str, platform: &CdPlatform) -> (&'static str, &'static str) { + match name { + // Azure + "AZURE_CLIENT_ID" => ( + "Azure AD App Registration client ID", + "`az ad app create --display-name ` → appId", + ), + "AZURE_TENANT_ID" => ( + "Azure AD tenant ID", + "`az account show` → tenantId", + ), + "AZURE_SUBSCRIPTION_ID" => ( + "Azure subscription ID", + "`az account show` → id", + ), + "ACR_LOGIN_SERVER" => ( + "Azure Container Registry login server", + "`az acr show --name --query loginServer`", + ), + // GCP + "GCP_PROJECT_ID" => ( + "GCP project ID", + "`gcloud config get-value project`", + ), + "GCP_WORKLOAD_IDENTITY_PROVIDER" => ( + "Workload Identity Federation provider", + "IAM → Workload Identity Pools → Provider", + ), + "GCP_SERVICE_ACCOUNT" => ( + "GCP service account email", + "`gcloud iam service-accounts list`", + ), + "GAR_LOCATION" => ( + "Google Artifact Registry location", + "e.g. `us-central1`, `europe-west1`", + ), + // Hetzner + "SSH_PRIVATE_KEY" => ( + "SSH private key for VPS access", + "`ssh-keygen -t ed25519` → add public key to Hetzner project", + ), + "DEPLOY_HOST" => ( + "VPS hostname or IP address", + "Hetzner Cloud Console → Server → IP", + ), + "DEPLOY_USER" => ( + "SSH user on the target server", + "Typically `root` or a deploy user", + ), + "KUBECONFIG_DATA" => ( + "Base64-encoded kubeconfig for k8s cluster", + "`cat kubeconfig | base64`", + ), + // Notifications + "SLACK_WEBHOOK_URL" => ( + "Slack incoming webhook URL", + "Slack API → Incoming Webhooks → Create", + ), + // Registry (generic) + "GHCR_TOKEN" | "CR_PAT" => ( + "GitHub Container Registry personal access token", + "GitHub Settings → Developer → PAT → `write:packages`", + ), + // Fallback + _ => match platform { + CdPlatform::Azure => ( + "Azure-specific secret", + "Azure Portal → App Registrations / Key Vault", + ), + CdPlatform::Gcp => ( + "GCP-specific secret", + "GCP Console → Secret Manager", + ), + CdPlatform::Hetzner => ( + "Hetzner/deployment secret", + "Hetzner Cloud Console or SSH key management", + ), + }, + } +} + +/// CD-28 — Hetzner firewall & network prerequisites checklist. +fn hetzner_prerequisites_checklist() -> String { + "\n### Hetzner Prerequisites Checklist\n\n\ + Before deploying to Hetzner, ensure the following are configured:\n\n\ + - [ ] **SSH key** added to your Hetzner project (Cloud Console → SSH Keys)\n\ + - [ ] **Firewall rules** configured:\n\ + - Port 22 (SSH) — for deployment access\n\ + - Port 80 (HTTP) — for web traffic\n\ + - Port 443 (HTTPS) — for secure web traffic\n\ + - Port 6443 (K8s API) — if using Kubernetes\n\ + - [ ] **Docker installed** on the target VPS (`curl -fsSL https://get.docker.com | sh`)\n\ + - [ ] **Docker Compose** installed (or use Docker Swarm mode)\n\ + - [ ] **Deploy user** created with Docker group membership (`usermod -aG docker deploy`)\n\ + - [ ] **DNS** configured to point to the server IP\n" + .to_string() +} + +// ── Helpers ─────────────────────────────────────────────────────────────────── + +/// Extracts a secret name from text immediately following `secrets.`. +fn extract_secret_name(after_dot: &str) -> Option { + let name: String = after_dot + .chars() + .take_while(|c| c.is_ascii_alphanumeric() || *c == '_') + .collect(); + + if name.is_empty() { + None + } else { + Some(name) + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn collect_secrets_from_yaml() { + let yaml = r#" + env: + TOKEN: ${{ secrets.AZURE_CLIENT_ID }} + OTHER: ${{ secrets.AZURE_TENANT_ID }} + "#; + let names = collect_cd_secret_names(yaml); + assert!(names.contains("AZURE_CLIENT_ID")); + assert!(names.contains("AZURE_TENANT_ID")); + } + + #[test] + fn collect_deduplicates() { + let yaml = "secrets.FOO and secrets.FOO again"; + let names = collect_cd_secret_names(yaml); + assert_eq!(names.len(), 1); + } + + #[test] + fn collect_empty_yaml() { + let names = collect_cd_secret_names("no secrets here"); + assert!(names.is_empty()); + } + + #[test] + fn generate_doc_azure() { + let yaml = "${{ secrets.AZURE_CLIENT_ID }}"; + let doc = generate_cd_secrets_doc(yaml, &CdPlatform::Azure); + assert!(doc.contains("AZURE_CLIENT_ID")); + assert!(doc.contains("Azure AD")); + assert!(!doc.contains("Hetzner Prerequisites")); + } + + #[test] + fn generate_doc_gcp() { + let yaml = "${{ secrets.GCP_PROJECT_ID }}"; + let doc = generate_cd_secrets_doc(yaml, &CdPlatform::Gcp); + assert!(doc.contains("GCP_PROJECT_ID")); + } + + #[test] + fn generate_doc_hetzner_includes_checklist() { + let yaml = "${{ secrets.SSH_PRIVATE_KEY }}"; + let doc = generate_cd_secrets_doc(yaml, &CdPlatform::Hetzner); + assert!(doc.contains("SSH_PRIVATE_KEY")); + assert!(doc.contains("Hetzner Prerequisites Checklist")); + assert!(doc.contains("Port 22")); + assert!(doc.contains("Docker installed")); + } + + #[test] + fn generate_doc_no_secrets() { + let doc = generate_cd_secrets_doc("just yaml", &CdPlatform::Azure); + assert!(doc.contains("No CD secrets detected")); + } + + #[test] + fn extract_secret_name_valid() { + assert_eq!( + extract_secret_name("FOO_BAR }}"), + Some("FOO_BAR".to_string()) + ); + } + + #[test] + fn extract_secret_name_empty() { + assert_eq!(extract_secret_name(" not a name"), None); + } + + #[test] + fn hetzner_checklist_content() { + let checklist = hetzner_prerequisites_checklist(); + assert!(checklist.contains("SSH key")); + assert!(checklist.contains("Port 6443")); + assert!(checklist.contains("Docker Compose")); + assert!(checklist.contains("DNS")); + } + + #[test] + fn metadata_azure_known_secret() { + let (desc, _) = secret_metadata("AZURE_CLIENT_ID", &CdPlatform::Azure); + assert!(desc.contains("Azure AD")); + } + + #[test] + fn metadata_gcp_known_secret() { + let (desc, _) = secret_metadata("GCP_PROJECT_ID", &CdPlatform::Gcp); + assert!(desc.contains("GCP project")); + } + + #[test] + fn metadata_hetzner_known_secret() { + let (desc, _) = secret_metadata("SSH_PRIVATE_KEY", &CdPlatform::Hetzner); + assert!(desc.contains("SSH private key")); + } + + #[test] + fn metadata_slack_secret() { + let (desc, _) = secret_metadata("SLACK_WEBHOOK_URL", &CdPlatform::Azure); + assert!(desc.contains("Slack")); + } + + #[test] + fn metadata_unknown_secret_azure() { + let (desc, _) = secret_metadata("CUSTOM_SECRET", &CdPlatform::Azure); + assert!(desc.contains("Azure")); + } + + #[test] + fn metadata_unknown_secret_gcp() { + let (desc, _) = secret_metadata("CUSTOM_SECRET", &CdPlatform::Gcp); + assert!(desc.contains("GCP")); + } + + #[test] + fn metadata_unknown_secret_hetzner() { + let (desc, _) = secret_metadata("CUSTOM_SECRET", &CdPlatform::Hetzner); + assert!(desc.contains("Hetzner")); + } + + #[test] + fn table_format_has_header() { + let mut names = BTreeSet::new(); + names.insert("FOO".to_string()); + let table = render_cd_secrets_table(&names, &CdPlatform::Azure); + assert!(table.contains("| Secret |")); + assert!(table.contains("| `FOO` |")); + } + + #[test] + fn collect_multiple_distinct_secrets() { + let yaml = "secrets.A and secrets.B and secrets.C"; + let names = collect_cd_secret_names(yaml); + assert_eq!(names.len(), 3); + } +} diff --git a/src/generator/cd_generation/templates/azure.rs b/src/generator/cd_generation/templates/azure.rs new file mode 100644 index 00000000..4f5f114e --- /dev/null +++ b/src/generator/cd_generation/templates/azure.rs @@ -0,0 +1,440 @@ +//! CD-18 — Azure CD Template Builder +//! +//! Assembles a complete `.github/workflows/deploy-azure.yml` from a +//! `CdPipeline` struct. The pipeline order is: +//! +//! 1. Checkout +//! 2. Azure login (OIDC) +//! 3. Docker build + push (via Buildx) +//! 4. Database migration *(optional)* +//! 5. Deploy to target (App Service / AKS / Container Apps) +//! 6. Health check +//! +//! All YAML is assembled by string formatting from the `CdPipeline` schema +//! types. Template builders own the full workflow structure; step modules +//! provide helpers for individual snippets at a lower level. + +use crate::generator::cd_generation::{ + context::DeployTarget, + health_check::is_kubectl_health_check, + schema::CdPipeline, +}; + +/// Renders a complete Azure CD workflow YAML string. +pub fn render(pipeline: &CdPipeline) -> String { + let mut yaml = String::with_capacity(4096); + + yaml.push_str(&render_header(pipeline)); + yaml.push_str("jobs:\n"); + yaml.push_str(" deploy:\n"); + yaml.push_str(" runs-on: ubuntu-latest\n"); + yaml.push_str(" steps:\n"); + + // 1. Checkout + yaml.push_str(" - uses: actions/checkout@v4\n\n"); + + // 2. Azure login (OIDC) + yaml.push_str(&render_auth_step(pipeline)); + + // 3. Docker build + push + yaml.push_str(&render_docker_step(pipeline)); + + // 4. Migration (optional) + if let Some(ref migration) = pipeline.migration { + yaml.push_str(&render_migration_step(migration)); + } + + // 5. Deploy + yaml.push_str(&render_deploy_step(pipeline)); + + // 6. Health check + yaml.push_str(&render_health_check_step(pipeline)); + + // Rollback comment + yaml.push_str(&render_rollback_comment(pipeline)); + + yaml +} + +/// Returns the canonical output filename. +pub fn workflow_filename() -> &'static str { + "deploy-azure.yml" +} + +// ── Private renderers ───────────────────────────────────────────────────────── + +fn render_header(pipeline: &CdPipeline) -> String { + format!( + "\ +# Auto-generated by sync-ctl — Azure CD pipeline for {project} +# Target: {target} +name: Deploy to Azure ({target}) + +on: + push: + branches: + - {branch} + workflow_dispatch: + +permissions: + id-token: write + contents: read + +env: + IMAGE_NAME: {image} + +", + project = pipeline.project_name, + target = pipeline.deploy_target, + branch = pipeline.default_branch, + image = pipeline.image_name, + ) +} + +fn render_auth_step(pipeline: &CdPipeline) -> String { + let action = pipeline + .auth + .action + .as_deref() + .unwrap_or("azure/login@v2"); + + format!( + " - name: Azure login (OIDC) + uses: {action} + with: + client-id: ${{{{ secrets.AZURE_CLIENT_ID }}}} + tenant-id: ${{{{ secrets.AZURE_TENANT_ID }}}} + subscription-id: ${{{{ secrets.AZURE_SUBSCRIPTION_ID }}}}\n\n", + ) +} + +fn render_docker_step(pipeline: &CdPipeline) -> String { + format!( + " - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build and push Docker image + uses: docker/build-push-action@v6 + with: + context: {context} + file: {dockerfile} + push: true + tags: {image_tag} + cache-from: type=gha + cache-to: type=gha,mode=max\n\n", + context = pipeline.docker_build_push.context, + dockerfile = pipeline.docker_build_push.dockerfile, + image_tag = pipeline.docker_build_push.image_tag, + ) +} + +fn render_migration_step( + migration: &crate::generator::cd_generation::schema::MigrationStep, +) -> String { + if migration.via_ssh { + format!( + " - name: Run database migrations ({tool}) via SSH + run: | + ssh ${{{{ secrets.SSH_USER }}}}@${{{{ secrets.SSH_HOST }}}} << 'MIGRATE_EOF' + cd /opt/app && {command} + MIGRATE_EOF + env: + DATABASE_URL: ${{{{ secrets.DATABASE_URL }}}}\n\n", + tool = migration.tool, + command = migration.command, + ) + } else { + format!( + " - name: Run database migrations ({tool}) + run: {command} + env: + DATABASE_URL: ${{{{ secrets.DATABASE_URL }}}}\n\n", + tool = migration.tool, + command = migration.command, + ) + } +} + +fn render_deploy_step(pipeline: &CdPipeline) -> String { + match pipeline.deploy_target { + DeployTarget::AppService => format!( + " - name: Deploy to Azure App Service + uses: azure/webapps-deploy@v3 + with: + app-name: ${{{{ secrets.AZURE_APP_NAME }}}} + images: {image_tag}\n\n", + image_tag = pipeline.docker_build_push.image_tag, + ), + DeployTarget::Aks => format!( + " - name: Set AKS context + uses: azure/aks-set-context@v4 + with: + resource-group: ${{{{ secrets.AKS_RESOURCE_GROUP }}}} + cluster-name: ${{{{ secrets.AKS_CLUSTER_NAME }}}} + + - name: Deploy to AKS + uses: azure/k8s-deploy@v5 + with: + namespace: ${{{{ secrets.K8S_NAMESPACE }}}} + images: {image_tag} + manifests: | + k8s/\n\n", + image_tag = pipeline.docker_build_push.image_tag, + ), + DeployTarget::ContainerApps => format!( + " - name: Deploy to Azure Container Apps + uses: azure/container-apps-deploy@v2 + with: + containerAppName: ${{{{ secrets.CONTAINER_APP_NAME }}}} + resourceGroup: ${{{{ secrets.AZURE_RESOURCE_GROUP }}}} + imageToDeploy: {image_tag}\n\n", + image_tag = pipeline.docker_build_push.image_tag, + ), + _ => format!( + " - name: Deploy ({target}) + run: echo 'Deploy step for {target} — customize this step' + env: + IMAGE_TAG: {image_tag}\n\n", + target = pipeline.deploy_target, + image_tag = pipeline.docker_build_push.image_tag, + ), + } +} + +fn render_health_check_step(pipeline: &CdPipeline) -> String { + if is_kubectl_health_check(&pipeline.deploy_target) { + let timeout = pipeline.health_check.retries * pipeline.health_check.interval_secs; + format!( + " - name: Health check — rollout status + run: | + kubectl rollout status deployment/${{{{ secrets.K8S_DEPLOYMENT_NAME }}}} \\ + --namespace=${{{{ secrets.K8S_NAMESPACE }}}} \\ + --timeout={timeout}s\n\n", + ) + } else { + format!( + " - name: Health check + run: | + curl --fail \\ + --retry {retries} \\ + --retry-delay {interval} \\ + --retry-all-errors \\ + -o /dev/null -s -w '%{{http_code}}' \\ + {url}\n\n", + retries = pipeline.health_check.retries, + interval = pipeline.health_check.interval_secs, + url = pipeline.health_check.url, + ) + } +} + +fn render_rollback_comment(pipeline: &CdPipeline) -> String { + format!( + "\ +# ── Rollback ────────────────────────────────────────────────── +# Strategy: {strategy} +# Command: {command} +", + strategy = pipeline.rollback_info.strategy, + command = pipeline.rollback_info.command_hint, + ) +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::generator::cd_generation::{ + context::{CdPlatform, DeployTarget, MigrationTool, Registry}, + schema::*, + }; + + fn sample_pipeline(target: DeployTarget, with_migration: bool) -> CdPipeline { + CdPipeline { + project_name: "my-app".to_string(), + platform: CdPlatform::Azure, + deploy_target: target.clone(), + environments: vec![EnvironmentConfig { + name: "production".to_string(), + branch_filter: None, + requires_approval: true, + app_url: Some("https://my-app.azurewebsites.net".to_string()), + namespace: None, + replicas: None, + }], + auth: AuthStep { + action: Some("azure/login@v2".to_string()), + method: "oidc".to_string(), + required_secrets: vec![ + "AZURE_CLIENT_ID".to_string(), + "AZURE_TENANT_ID".to_string(), + "AZURE_SUBSCRIPTION_ID".to_string(), + ], + }, + registry: RegistryStep { + registry: Registry::Acr, + login_action: Some("azure/docker-login@v2".to_string()), + registry_url: "${{ secrets.ACR_LOGIN_SERVER }}".to_string(), + }, + docker_build_push: DockerBuildPushStep { + image_tag: "${{ secrets.ACR_LOGIN_SERVER }}/my-app:${{ github.sha }}".to_string(), + dockerfile: "Dockerfile".to_string(), + context: ".".to_string(), + push: true, + buildx: true, + build_args: vec![], + }, + migration: if with_migration { + Some(MigrationStep { + tool: MigrationTool::Prisma, + command: "npx prisma migrate deploy".to_string(), + via_ssh: false, + }) + } else { + None + }, + terraform: None, + deploy: DeployStep { + target: target.clone(), + strategy: "rolling".to_string(), + command: "azure/webapps-deploy@v3".to_string(), + args: vec![], + }, + health_check: HealthCheckStep { + url: "https://${{ secrets.AZURE_APP_NAME }}.azurewebsites.net/health".to_string(), + retries: 5, + interval_secs: 10, + expected_status: 200, + }, + rollback_info: RollbackInfo { + strategy: "redeploy-previous".to_string(), + command_hint: "az webapp deployment slot swap".to_string(), + }, + notifications: None, + unresolved_tokens: vec![], + default_branch: "main".to_string(), + image_name: "my-app".to_string(), + } + } + + #[test] + fn header_contains_project_name() { + let yaml = render(&sample_pipeline(DeployTarget::AppService, false)); + assert!(yaml.contains("my-app")); + } + + #[test] + fn header_contains_branch() { + let yaml = render(&sample_pipeline(DeployTarget::AppService, false)); + assert!(yaml.contains("- main")); + } + + #[test] + fn header_has_workflow_dispatch() { + let yaml = render(&sample_pipeline(DeployTarget::AppService, false)); + assert!(yaml.contains("workflow_dispatch")); + } + + #[test] + fn header_has_oidc_permissions() { + let yaml = render(&sample_pipeline(DeployTarget::AppService, false)); + assert!(yaml.contains("id-token: write")); + } + + #[test] + fn contains_checkout() { + let yaml = render(&sample_pipeline(DeployTarget::AppService, false)); + assert!(yaml.contains("actions/checkout@v4")); + } + + #[test] + fn contains_azure_login() { + let yaml = render(&sample_pipeline(DeployTarget::AppService, false)); + assert!(yaml.contains("azure/login@v2")); + } + + #[test] + fn contains_docker_buildx_setup() { + let yaml = render(&sample_pipeline(DeployTarget::AppService, false)); + assert!(yaml.contains("docker/setup-buildx-action@v3")); + } + + #[test] + fn contains_docker_build_push() { + let yaml = render(&sample_pipeline(DeployTarget::AppService, false)); + assert!(yaml.contains("docker/build-push-action@v6")); + } + + #[test] + fn contains_health_check() { + let yaml = render(&sample_pipeline(DeployTarget::AppService, false)); + assert!(yaml.contains("Health check")); + } + + #[test] + fn contains_rollback_comment() { + let yaml = render(&sample_pipeline(DeployTarget::AppService, false)); + assert!(yaml.contains("Rollback")); + assert!(yaml.contains("redeploy-previous")); + } + + #[test] + fn app_service_deploy_uses_webapps_action() { + let yaml = render(&sample_pipeline(DeployTarget::AppService, false)); + assert!(yaml.contains("azure/webapps-deploy@v3")); + } + + #[test] + fn aks_deploy_uses_k8s_deploy_action() { + let yaml = render(&sample_pipeline(DeployTarget::Aks, false)); + assert!(yaml.contains("azure/k8s-deploy@v5")); + } + + #[test] + fn aks_deploy_sets_context() { + let yaml = render(&sample_pipeline(DeployTarget::Aks, false)); + assert!(yaml.contains("azure/aks-set-context@v4")); + } + + #[test] + fn container_apps_deploy_uses_action() { + let yaml = render(&sample_pipeline(DeployTarget::ContainerApps, false)); + assert!(yaml.contains("azure/container-apps-deploy@v2")); + } + + #[test] + fn no_migration_when_absent() { + let yaml = render(&sample_pipeline(DeployTarget::AppService, false)); + assert!(!yaml.contains("migration")); + } + + #[test] + fn migration_present_when_set() { + let yaml = render(&sample_pipeline(DeployTarget::AppService, true)); + assert!(yaml.contains("prisma migrate deploy")); + } + + #[test] + fn migration_references_database_url() { + let yaml = render(&sample_pipeline(DeployTarget::AppService, true)); + assert!(yaml.contains("DATABASE_URL")); + } + + #[test] + fn app_service_health_check_uses_curl() { + let yaml = render(&sample_pipeline(DeployTarget::AppService, false)); + assert!(yaml.contains("curl --fail")); + } + + #[test] + fn aks_health_check_uses_kubectl() { + let yaml = render(&sample_pipeline(DeployTarget::Aks, false)); + assert!(yaml.contains("kubectl rollout status")); + } + + #[test] + fn filename_is_deploy_azure() { + assert_eq!(workflow_filename(), "deploy-azure.yml"); + } +} diff --git a/src/generator/cd_generation/templates/gcp.rs b/src/generator/cd_generation/templates/gcp.rs new file mode 100644 index 00000000..a308bf7b --- /dev/null +++ b/src/generator/cd_generation/templates/gcp.rs @@ -0,0 +1,460 @@ +//! CD-19 — GCP CD Template Builder +//! +//! Assembles a complete `.github/workflows/deploy-gcp.yml` from a +//! `CdPipeline` struct. The pipeline order is: +//! +//! 1. Checkout +//! 2. GCP auth (Workload Identity Federation) +//! 3. Configure Docker for GAR +//! 4. Docker build + push (via Buildx) +//! 5. Database migration *(optional)* +//! 6. Deploy to target (Cloud Run / GKE) +//! 7. Health check +//! +//! Like the Azure template, all YAML is assembled directly from `CdPipeline` +//! schema types via string formatting. + +use crate::generator::cd_generation::{ + context::DeployTarget, + health_check::is_kubectl_health_check, + schema::CdPipeline, +}; + +/// Renders a complete GCP CD workflow YAML string. +pub fn render(pipeline: &CdPipeline) -> String { + let mut yaml = String::with_capacity(4096); + + yaml.push_str(&render_header(pipeline)); + yaml.push_str("jobs:\n"); + yaml.push_str(" deploy:\n"); + yaml.push_str(" runs-on: ubuntu-latest\n"); + yaml.push_str(" steps:\n"); + + // 1. Checkout + yaml.push_str(" - uses: actions/checkout@v4\n\n"); + + // 2. GCP auth (WIF) + yaml.push_str(&render_auth_step(pipeline)); + + // 3. Configure Docker for GAR + yaml.push_str(&render_gar_docker_auth()); + + // 4. Docker build + push + yaml.push_str(&render_docker_step(pipeline)); + + // 5. Migration (optional) + if let Some(ref migration) = pipeline.migration { + yaml.push_str(&render_migration_step(migration)); + } + + // 6. Deploy + yaml.push_str(&render_deploy_step(pipeline)); + + // 7. Health check + yaml.push_str(&render_health_check_step(pipeline)); + + // Rollback comment + yaml.push_str(&render_rollback_comment(pipeline)); + + yaml +} + +/// Returns the canonical output filename. +pub fn workflow_filename() -> &'static str { + "deploy-gcp.yml" +} + +// ── Private renderers ───────────────────────────────────────────────────────── + +fn render_header(pipeline: &CdPipeline) -> String { + format!( + "\ +# Auto-generated by sync-ctl — GCP CD pipeline for {project} +# Target: {target} +name: Deploy to GCP ({target}) + +on: + push: + branches: + - {branch} + workflow_dispatch: + +permissions: + id-token: write + contents: read + +env: + IMAGE_NAME: {image} + +", + project = pipeline.project_name, + target = pipeline.deploy_target, + branch = pipeline.default_branch, + image = pipeline.image_name, + ) +} + +fn render_auth_step(pipeline: &CdPipeline) -> String { + let action = pipeline + .auth + .action + .as_deref() + .unwrap_or("google-github-actions/auth@v2"); + + format!( + " - name: Authenticate to Google Cloud + id: auth + uses: {action} + with: + workload_identity_provider: ${{{{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}}} + service_account: ${{{{ secrets.GCP_SERVICE_ACCOUNT }}}} + + - name: Set up Cloud SDK + uses: google-github-actions/setup-gcloud@v2\n\n", + ) +} + +fn render_gar_docker_auth() -> String { + " - name: Configure Docker for Artifact Registry + run: gcloud auth configure-docker ${{ secrets.GAR_LOCATION }}-docker.pkg.dev --quiet\n\n" + .to_string() +} + +fn render_docker_step(pipeline: &CdPipeline) -> String { + format!( + " - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build and push Docker image + uses: docker/build-push-action@v6 + with: + context: {context} + file: {dockerfile} + push: true + tags: {image_tag} + cache-from: type=gha + cache-to: type=gha,mode=max\n\n", + context = pipeline.docker_build_push.context, + dockerfile = pipeline.docker_build_push.dockerfile, + image_tag = pipeline.docker_build_push.image_tag, + ) +} + +fn render_migration_step( + migration: &crate::generator::cd_generation::schema::MigrationStep, +) -> String { + if migration.via_ssh { + format!( + " - name: Run database migrations ({tool}) via SSH + run: | + ssh ${{{{ secrets.SSH_USER }}}}@${{{{ secrets.SSH_HOST }}}} << 'MIGRATE_EOF' + cd /opt/app && {command} + MIGRATE_EOF + env: + DATABASE_URL: ${{{{ secrets.DATABASE_URL }}}}\n\n", + tool = migration.tool, + command = migration.command, + ) + } else { + format!( + " - name: Run database migrations ({tool}) + run: {command} + env: + DATABASE_URL: ${{{{ secrets.DATABASE_URL }}}}\n\n", + tool = migration.tool, + command = migration.command, + ) + } +} + +fn render_deploy_step(pipeline: &CdPipeline) -> String { + match pipeline.deploy_target { + DeployTarget::CloudRun => format!( + " - name: Deploy to Cloud Run + id: deploy + uses: google-github-actions/deploy-cloudrun@v2 + with: + service: ${{{{ secrets.CLOUD_RUN_SERVICE }}}} + region: ${{{{ secrets.GCP_REGION }}}} + image: {image_tag}\n\n", + image_tag = pipeline.docker_build_push.image_tag, + ), + DeployTarget::Gke => format!( + " - name: Get GKE credentials + uses: google-github-actions/get-gke-credentials@v2 + with: + cluster_name: ${{{{ secrets.GKE_CLUSTER_NAME }}}} + location: ${{{{ secrets.GKE_LOCATION }}}} + + - name: Deploy to GKE + run: | + kubectl set image deployment/${{{{ secrets.K8S_DEPLOYMENT_NAME }}}} \\ + ${{{{ secrets.K8S_DEPLOYMENT_NAME }}}}={image_tag} \\ + --namespace=${{{{ secrets.K8S_NAMESPACE }}}} + kubectl rollout status deployment/${{{{ secrets.K8S_DEPLOYMENT_NAME }}}} \\ + --namespace=${{{{ secrets.K8S_NAMESPACE }}}} \\ + --timeout=300s\n\n", + image_tag = pipeline.docker_build_push.image_tag, + ), + _ => format!( + " - name: Deploy ({target}) + run: echo 'Deploy step for {target} — customize this step' + env: + IMAGE_TAG: {image_tag}\n\n", + target = pipeline.deploy_target, + image_tag = pipeline.docker_build_push.image_tag, + ), + } +} + +fn render_health_check_step(pipeline: &CdPipeline) -> String { + if is_kubectl_health_check(&pipeline.deploy_target) { + let timeout = pipeline.health_check.retries * pipeline.health_check.interval_secs; + format!( + " - name: Health check — rollout status + run: | + kubectl rollout status deployment/${{{{ secrets.K8S_DEPLOYMENT_NAME }}}} \\ + --namespace=${{{{ secrets.K8S_NAMESPACE }}}} \\ + --timeout={timeout}s\n\n", + ) + } else if matches!(pipeline.deploy_target, DeployTarget::CloudRun) { + format!( + " - name: Health check + run: | + curl --fail \\ + --retry {retries} \\ + --retry-delay {interval} \\ + --retry-all-errors \\ + -o /dev/null -s -w '%{{http_code}}' \\ + ${{{{ steps.deploy.outputs.url }}}}/health\n\n", + retries = pipeline.health_check.retries, + interval = pipeline.health_check.interval_secs, + ) + } else { + format!( + " - name: Health check + run: | + curl --fail \\ + --retry {retries} \\ + --retry-delay {interval} \\ + --retry-all-errors \\ + -o /dev/null -s -w '%{{http_code}}' \\ + {url}\n\n", + retries = pipeline.health_check.retries, + interval = pipeline.health_check.interval_secs, + url = pipeline.health_check.url, + ) + } +} + +fn render_rollback_comment(pipeline: &CdPipeline) -> String { + format!( + "\ +# ── Rollback ────────────────────────────────────────────────── +# Strategy: {strategy} +# Command: {command} +", + strategy = pipeline.rollback_info.strategy, + command = pipeline.rollback_info.command_hint, + ) +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::generator::cd_generation::{ + context::{CdPlatform, DeployTarget, MigrationTool, Registry}, + schema::*, + }; + + fn sample_gcp_pipeline(target: DeployTarget, with_migration: bool) -> CdPipeline { + CdPipeline { + project_name: "my-api".to_string(), + platform: CdPlatform::Gcp, + deploy_target: target.clone(), + environments: vec![EnvironmentConfig { + name: "production".to_string(), + branch_filter: None, + requires_approval: true, + app_url: None, + namespace: None, + replicas: None, + }], + auth: AuthStep { + action: Some("google-github-actions/auth@v2".to_string()), + method: "workload-identity".to_string(), + required_secrets: vec![ + "GCP_WORKLOAD_IDENTITY_PROVIDER".to_string(), + "GCP_SERVICE_ACCOUNT".to_string(), + ], + }, + registry: RegistryStep { + registry: Registry::Gar, + login_action: Some("docker/login-action@v3".to_string()), + registry_url: "${{ secrets.GAR_LOCATION }}-docker.pkg.dev".to_string(), + }, + docker_build_push: DockerBuildPushStep { + image_tag: "${{ secrets.GAR_LOCATION }}-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/my-api:${{ github.sha }}".to_string(), + dockerfile: "Dockerfile".to_string(), + context: ".".to_string(), + push: true, + buildx: true, + build_args: vec![], + }, + migration: if with_migration { + Some(MigrationStep { + tool: MigrationTool::Alembic, + command: "alembic upgrade head".to_string(), + via_ssh: false, + }) + } else { + None + }, + terraform: None, + deploy: DeployStep { + target: target.clone(), + strategy: "rolling".to_string(), + command: "google-github-actions/deploy-cloudrun@v2".to_string(), + args: vec![], + }, + health_check: HealthCheckStep { + url: "${{ steps.deploy.outputs.url }}/health".to_string(), + retries: 5, + interval_secs: 10, + expected_status: 200, + }, + rollback_info: RollbackInfo { + strategy: "traffic-shift".to_string(), + command_hint: "gcloud run services update-traffic --to-revisions=LATEST=0".to_string(), + }, + notifications: None, + unresolved_tokens: vec![], + default_branch: "main".to_string(), + image_name: "my-api".to_string(), + } + } + + // ── Header ──────────────────────────────────────────────────────── + + #[test] + fn header_contains_project_name() { + let yaml = render(&sample_gcp_pipeline(DeployTarget::CloudRun, false)); + assert!(yaml.contains("my-api")); + } + + #[test] + fn header_has_oidc_permissions() { + let yaml = render(&sample_gcp_pipeline(DeployTarget::CloudRun, false)); + assert!(yaml.contains("id-token: write")); + } + + #[test] + fn header_has_workflow_dispatch() { + let yaml = render(&sample_gcp_pipeline(DeployTarget::CloudRun, false)); + assert!(yaml.contains("workflow_dispatch")); + } + + // ── Structure ───────────────────────────────────────────────────── + + #[test] + fn contains_checkout() { + let yaml = render(&sample_gcp_pipeline(DeployTarget::CloudRun, false)); + assert!(yaml.contains("actions/checkout@v4")); + } + + #[test] + fn contains_gcp_auth() { + let yaml = render(&sample_gcp_pipeline(DeployTarget::CloudRun, false)); + assert!(yaml.contains("google-github-actions/auth@v2")); + } + + #[test] + fn contains_setup_gcloud() { + let yaml = render(&sample_gcp_pipeline(DeployTarget::CloudRun, false)); + assert!(yaml.contains("google-github-actions/setup-gcloud@v2")); + } + + #[test] + fn contains_gar_docker_auth() { + let yaml = render(&sample_gcp_pipeline(DeployTarget::CloudRun, false)); + assert!(yaml.contains("gcloud auth configure-docker")); + } + + #[test] + fn contains_docker_buildx() { + let yaml = render(&sample_gcp_pipeline(DeployTarget::CloudRun, false)); + assert!(yaml.contains("docker/setup-buildx-action@v3")); + } + + #[test] + fn contains_docker_build_push() { + let yaml = render(&sample_gcp_pipeline(DeployTarget::CloudRun, false)); + assert!(yaml.contains("docker/build-push-action@v6")); + } + + // ── Cloud Run deploy ────────────────────────────────────────────── + + #[test] + fn cloud_run_deploy_uses_action() { + let yaml = render(&sample_gcp_pipeline(DeployTarget::CloudRun, false)); + assert!(yaml.contains("google-github-actions/deploy-cloudrun@v2")); + } + + #[test] + fn cloud_run_health_check_uses_step_output() { + let yaml = render(&sample_gcp_pipeline(DeployTarget::CloudRun, false)); + assert!(yaml.contains("steps.deploy.outputs.url")); + } + + // ── GKE deploy ──────────────────────────────────────────────────── + + #[test] + fn gke_deploy_uses_get_credentials() { + let yaml = render(&sample_gcp_pipeline(DeployTarget::Gke, false)); + assert!(yaml.contains("google-github-actions/get-gke-credentials@v2")); + } + + #[test] + fn gke_deploy_uses_kubectl() { + let yaml = render(&sample_gcp_pipeline(DeployTarget::Gke, false)); + assert!(yaml.contains("kubectl set image")); + } + + #[test] + fn gke_deploy_has_rollout_status() { + let yaml = render(&sample_gcp_pipeline(DeployTarget::Gke, false)); + assert!(yaml.contains("kubectl rollout status")); + } + + // ── Migration ───────────────────────────────────────────────────── + + #[test] + fn no_migration_when_absent() { + let yaml = render(&sample_gcp_pipeline(DeployTarget::CloudRun, false)); + assert!(!yaml.contains("migration")); + } + + #[test] + fn migration_present_when_set() { + let yaml = render(&sample_gcp_pipeline(DeployTarget::CloudRun, true)); + assert!(yaml.contains("alembic upgrade head")); + } + + // ── Rollback ────────────────────────────────────────────────────── + + #[test] + fn rollback_comment_present() { + let yaml = render(&sample_gcp_pipeline(DeployTarget::CloudRun, false)); + assert!(yaml.contains("Rollback")); + assert!(yaml.contains("traffic-shift")); + } + + // ── Filename ────────────────────────────────────────────────────── + + #[test] + fn filename_is_deploy_gcp() { + assert_eq!(workflow_filename(), "deploy-gcp.yml"); + } +} diff --git a/src/generator/cd_generation/templates/hetzner.rs b/src/generator/cd_generation/templates/hetzner.rs new file mode 100644 index 00000000..3ac7ba27 --- /dev/null +++ b/src/generator/cd_generation/templates/hetzner.rs @@ -0,0 +1,489 @@ +//! CD-20 — Hetzner CD Template Builder +//! +//! Assembles a complete `.github/workflows/deploy-hetzner.yml` from a +//! `CdPipeline` struct. The pipeline order is: +//! +//! 1. Checkout +//! 2. GHCR login +//! 3. Docker build + push (via Buildx) +//! 4. SSH agent setup *(VPS / Coolify)* +//! 5. Database migration via SSH *(optional)* +//! 6. Deploy to target (VPS / HetznerK8s / Coolify) +//! 7. Health check +//! +//! Hetzner targets are unique because VPS and Coolify deploy over SSH, +//! while HetznerK8s uses a kubeconfig secret. The template builder +//! adapts the steps accordingly. + +use crate::generator::cd_generation::{ + context::DeployTarget, + health_check::is_kubectl_health_check, + schema::CdPipeline, +}; + +/// Renders a complete Hetzner CD workflow YAML string. +pub fn render(pipeline: &CdPipeline) -> String { + let mut yaml = String::with_capacity(4096); + + yaml.push_str(&render_header(pipeline)); + yaml.push_str("jobs:\n"); + yaml.push_str(" deploy:\n"); + yaml.push_str(" runs-on: ubuntu-latest\n"); + yaml.push_str(" steps:\n"); + + // 1. Checkout + yaml.push_str(" - uses: actions/checkout@v4\n\n"); + + // 2. GHCR login + yaml.push_str(&render_ghcr_login()); + + // 3. Docker build + push + yaml.push_str(&render_docker_step(pipeline)); + + // 4. SSH agent (for VPS and Coolify targets) + if needs_ssh(&pipeline.deploy_target) { + yaml.push_str(&render_ssh_agent()); + } + + // 4b. Kubeconfig (for HetznerK8s) + if matches!(pipeline.deploy_target, DeployTarget::HetznerK8s) { + yaml.push_str(&render_kubeconfig()); + } + + // 5. Migration (optional) + if let Some(ref migration) = pipeline.migration { + yaml.push_str(&render_migration_step(migration)); + } + + // 6. Deploy + yaml.push_str(&render_deploy_step(pipeline)); + + // 7. Health check + yaml.push_str(&render_health_check_step(pipeline)); + + // Rollback comment + yaml.push_str(&render_rollback_comment(pipeline)); + + yaml +} + +/// Returns the canonical output filename. +pub fn workflow_filename() -> &'static str { + "deploy-hetzner.yml" +} + +/// Returns `true` if the target requires SSH agent setup. +fn needs_ssh(target: &DeployTarget) -> bool { + matches!(target, DeployTarget::Vps | DeployTarget::Coolify) +} + +// ── Private renderers ───────────────────────────────────────────────────────── + +fn render_header(pipeline: &CdPipeline) -> String { + format!( + "\ +# Auto-generated by sync-ctl — Hetzner CD pipeline for {project} +# Target: {target} +name: Deploy to Hetzner ({target}) + +on: + push: + branches: + - {branch} + workflow_dispatch: + +permissions: + contents: read + packages: write + +env: + IMAGE_NAME: {image} + +", + project = pipeline.project_name, + target = pipeline.deploy_target, + branch = pipeline.default_branch, + image = pipeline.image_name, + ) +} + +fn render_ghcr_login() -> String { + " - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }}\n\n" + .to_string() +} + +fn render_docker_step(pipeline: &CdPipeline) -> String { + format!( + " - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build and push Docker image + uses: docker/build-push-action@v6 + with: + context: {context} + file: {dockerfile} + push: true + tags: {image_tag} + cache-from: type=gha + cache-to: type=gha,mode=max\n\n", + context = pipeline.docker_build_push.context, + dockerfile = pipeline.docker_build_push.dockerfile, + image_tag = pipeline.docker_build_push.image_tag, + ) +} + +fn render_ssh_agent() -> String { + " - name: Set up SSH agent + uses: webfactory/ssh-agent@v0.9.0 + with: + ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}\n\n" + .to_string() +} + +fn render_kubeconfig() -> String { + " - name: Set up kubeconfig + run: | + mkdir -p ~/.kube + echo \"${{ secrets.KUBECONFIG }}\" > ~/.kube/config + chmod 600 ~/.kube/config\n\n" + .to_string() +} + +fn render_migration_step( + migration: &crate::generator::cd_generation::schema::MigrationStep, +) -> String { + if migration.via_ssh { + format!( + " - name: Run database migrations ({tool}) via SSH + run: | + ssh ${{{{ secrets.SSH_USER }}}}@${{{{ secrets.SSH_HOST }}}} << 'MIGRATE_EOF' + cd /opt/app && {command} + MIGRATE_EOF + env: + DATABASE_URL: ${{{{ secrets.DATABASE_URL }}}}\n\n", + tool = migration.tool, + command = migration.command, + ) + } else { + format!( + " - name: Run database migrations ({tool}) + run: {command} + env: + DATABASE_URL: ${{{{ secrets.DATABASE_URL }}}}\n\n", + tool = migration.tool, + command = migration.command, + ) + } +} + +fn render_deploy_step(pipeline: &CdPipeline) -> String { + match pipeline.deploy_target { + DeployTarget::Vps => format!( + " - name: Deploy to VPS via SSH + run: | + ssh ${{{{ secrets.SSH_USER }}}}@${{{{ secrets.SSH_HOST }}}} << 'DEPLOY_EOF' + docker pull {image_tag} + cd /opt/app && docker compose up -d + DEPLOY_EOF\n\n", + image_tag = pipeline.docker_build_push.image_tag, + ), + DeployTarget::HetznerK8s => format!( + " - name: Deploy to Hetzner Kubernetes + run: | + kubectl set image deployment/${{{{ secrets.K8S_DEPLOYMENT_NAME }}}} \\ + ${{{{ secrets.K8S_DEPLOYMENT_NAME }}}}={image_tag} \\ + --namespace=${{{{ secrets.K8S_NAMESPACE }}}} + kubectl rollout status deployment/${{{{ secrets.K8S_DEPLOYMENT_NAME }}}} \\ + --namespace=${{{{ secrets.K8S_NAMESPACE }}}} \\ + --timeout=300s\n\n", + image_tag = pipeline.docker_build_push.image_tag, + ), + DeployTarget::Coolify => " - name: Deploy via Coolify webhook + run: | + curl -fsSL -X POST ${{ secrets.COOLIFY_WEBHOOK_URL }}\n\n" + .to_string(), + _ => format!( + " - name: Deploy ({target}) + run: echo 'Deploy step for {target} — customize this step' + env: + IMAGE_TAG: {image_tag}\n\n", + target = pipeline.deploy_target, + image_tag = pipeline.docker_build_push.image_tag, + ), + } +} + +fn render_health_check_step(pipeline: &CdPipeline) -> String { + if is_kubectl_health_check(&pipeline.deploy_target) { + let timeout = pipeline.health_check.retries * pipeline.health_check.interval_secs; + format!( + " - name: Health check — rollout status + run: | + kubectl rollout status deployment/${{{{ secrets.K8S_DEPLOYMENT_NAME }}}} \\ + --namespace=${{{{ secrets.K8S_NAMESPACE }}}} \\ + --timeout={timeout}s\n\n", + ) + } else { + format!( + " - name: Health check + run: | + curl --fail \\ + --retry {retries} \\ + --retry-delay {interval} \\ + --retry-all-errors \\ + -o /dev/null -s -w '%{{http_code}}' \\ + {url}\n\n", + retries = pipeline.health_check.retries, + interval = pipeline.health_check.interval_secs, + url = pipeline.health_check.url, + ) + } +} + +fn render_rollback_comment(pipeline: &CdPipeline) -> String { + format!( + "\ +# ── Rollback ────────────────────────────────────────────────── +# Strategy: {strategy} +# Command: {command} +", + strategy = pipeline.rollback_info.strategy, + command = pipeline.rollback_info.command_hint, + ) +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::generator::cd_generation::{ + context::{CdPlatform, DeployTarget, MigrationTool, Registry}, + schema::*, + }; + + fn sample_hetzner_pipeline(target: DeployTarget, with_migration: bool) -> CdPipeline { + let via_ssh = matches!(target, DeployTarget::Vps); + CdPipeline { + project_name: "my-svc".to_string(), + platform: CdPlatform::Hetzner, + deploy_target: target.clone(), + environments: vec![EnvironmentConfig { + name: "production".to_string(), + branch_filter: None, + requires_approval: false, + app_url: None, + namespace: None, + replicas: None, + }], + auth: AuthStep { + action: None, + method: "ssh".to_string(), + required_secrets: vec!["SSH_PRIVATE_KEY".to_string()], + }, + registry: RegistryStep { + registry: Registry::Ghcr, + login_action: Some("docker/login-action@v3".to_string()), + registry_url: "ghcr.io".to_string(), + }, + docker_build_push: DockerBuildPushStep { + image_tag: "ghcr.io/${{ github.repository_owner }}/my-svc:${{ github.sha }}" + .to_string(), + dockerfile: "Dockerfile".to_string(), + context: ".".to_string(), + push: true, + buildx: true, + build_args: vec![], + }, + migration: if with_migration { + Some(MigrationStep { + tool: MigrationTool::Diesel, + command: "diesel migration run".to_string(), + via_ssh, + }) + } else { + None + }, + terraform: None, + deploy: DeployStep { + target: target.clone(), + strategy: "recreate".to_string(), + command: "ssh".to_string(), + args: vec![], + }, + health_check: HealthCheckStep { + url: "https://${{ secrets.SSH_HOST }}/health".to_string(), + retries: 5, + interval_secs: 10, + expected_status: 200, + }, + rollback_info: RollbackInfo { + strategy: "manual".to_string(), + command_hint: "ssh user@host 'docker compose up -d --force-recreate'".to_string(), + }, + notifications: None, + unresolved_tokens: vec![], + default_branch: "main".to_string(), + image_name: "my-svc".to_string(), + } + } + + // ── Header ──────────────────────────────────────────────────────── + + #[test] + fn header_contains_project_name() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::Vps, false)); + assert!(yaml.contains("my-svc")); + } + + #[test] + fn header_has_packages_write_permission() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::Vps, false)); + assert!(yaml.contains("packages: write")); + } + + #[test] + fn header_has_workflow_dispatch() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::Vps, false)); + assert!(yaml.contains("workflow_dispatch")); + } + + // ── Structure ───────────────────────────────────────────────────── + + #[test] + fn contains_checkout() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::Vps, false)); + assert!(yaml.contains("actions/checkout@v4")); + } + + #[test] + fn contains_ghcr_login() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::Vps, false)); + assert!(yaml.contains("docker/login-action@v3")); + assert!(yaml.contains("ghcr.io")); + } + + #[test] + fn contains_docker_buildx() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::Vps, false)); + assert!(yaml.contains("docker/setup-buildx-action@v3")); + } + + // ── SSH agent ───────────────────────────────────────────────────── + + #[test] + fn vps_has_ssh_agent() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::Vps, false)); + assert!(yaml.contains("webfactory/ssh-agent@v0.9.0")); + } + + #[test] + fn coolify_has_ssh_agent() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::Coolify, false)); + assert!(yaml.contains("webfactory/ssh-agent@v0.9.0")); + } + + #[test] + fn k8s_has_no_ssh_agent() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::HetznerK8s, false)); + assert!(!yaml.contains("ssh-agent")); + } + + #[test] + fn k8s_has_kubeconfig() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::HetznerK8s, false)); + assert!(yaml.contains("KUBECONFIG")); + } + + // ── VPS deploy ──────────────────────────────────────────────────── + + #[test] + fn vps_deploy_uses_ssh() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::Vps, false)); + assert!(yaml.contains("ssh ${{ secrets.SSH_USER }}")); + } + + #[test] + fn vps_deploy_pulls_image() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::Vps, false)); + assert!(yaml.contains("docker pull")); + } + + #[test] + fn vps_deploy_composes_up() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::Vps, false)); + assert!(yaml.contains("docker compose up -d")); + } + + // ── HetznerK8s deploy ───────────────────────────────────────────── + + #[test] + fn k8s_deploy_uses_kubectl() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::HetznerK8s, false)); + assert!(yaml.contains("kubectl set image")); + } + + #[test] + fn k8s_deploy_has_rollout_status() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::HetznerK8s, false)); + assert!(yaml.contains("kubectl rollout status")); + } + + // ── Coolify deploy ──────────────────────────────────────────────── + + #[test] + fn coolify_deploy_uses_webhook() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::Coolify, false)); + assert!(yaml.contains("COOLIFY_WEBHOOK_URL")); + } + + // ── Migration ───────────────────────────────────────────────────── + + #[test] + fn no_migration_when_absent() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::Vps, false)); + assert!(!yaml.contains("migration")); + } + + #[test] + fn migration_via_ssh_for_vps() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::Vps, true)); + assert!(yaml.contains("diesel migration run")); + assert!(yaml.contains("via SSH")); + } + + // ── Health check ────────────────────────────────────────────────── + + #[test] + fn vps_health_check_uses_curl() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::Vps, false)); + assert!(yaml.contains("curl --fail")); + } + + #[test] + fn k8s_health_check_uses_kubectl() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::HetznerK8s, false)); + assert!(yaml.contains("kubectl rollout status")); + } + + // ── Rollback ────────────────────────────────────────────────────── + + #[test] + fn rollback_comment_present() { + let yaml = render(&sample_hetzner_pipeline(DeployTarget::Vps, false)); + assert!(yaml.contains("Rollback")); + assert!(yaml.contains("manual")); + } + + // ── Filename ────────────────────────────────────────────────────── + + #[test] + fn filename_is_deploy_hetzner() { + assert_eq!(workflow_filename(), "deploy-hetzner.yml"); + } +} diff --git a/src/generator/cd_generation/templates/mod.rs b/src/generator/cd_generation/templates/mod.rs new file mode 100644 index 00000000..ab259f2a --- /dev/null +++ b/src/generator/cd_generation/templates/mod.rs @@ -0,0 +1,14 @@ +//! CD Template Builders +//! +//! Each submodule assembles a final GitHub Actions workflow YAML file for +//! a specific cloud platform by stitching together the step snippets +//! produced by the `auth_*`, `registry`, `deploy_*`, `migration`, and +//! `health_check` modules. +//! +//! - `azure` — `.github/workflows/deploy-azure.yml` (CD-18) +//! - `gcp` — `.github/workflows/deploy-gcp.yml` (CD-19) +//! - `hetzner` — `.github/workflows/deploy-hetzner.yml` (CD-20) + +pub mod azure; +pub mod gcp; +pub mod hetzner; diff --git a/src/generator/cd_generation/terraform_step.rs b/src/generator/cd_generation/terraform_step.rs new file mode 100644 index 00000000..a7de67a4 --- /dev/null +++ b/src/generator/cd_generation/terraform_step.rs @@ -0,0 +1,216 @@ +//! CD-16 — Terraform Integration Step (Optional) +//! +//! Generates Terraform init/plan/apply steps gated by `CdContext.has_terraform`. +//! Injects `TF_VAR_image_tag` so Terraform can reference the deployed image. +//! +//! ```yaml +//! - name: Terraform Init +//! uses: hashicorp/setup-terraform@v3 +//! +//! - name: Terraform Plan +//! run: terraform plan -input=false +//! env: +//! TF_VAR_image_tag: ${{ env.IMAGE_TAG }} +//! +//! - name: Terraform Apply +//! if: github.ref == 'refs/heads/main' +//! run: terraform apply -auto-approve -input=false +//! ``` + +use super::schema::TerraformStep; + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Generates a `TerraformStep` from the context. +/// +/// `terraform_dir` is the working directory (e.g. `"terraform"`, `"infra"`). +/// `auto_approve` should be `false` for production environments. +pub fn generate_terraform_step( + terraform_dir: &str, + auto_approve: bool, +) -> TerraformStep { + TerraformStep { + working_directory: terraform_dir.to_string(), + version: "{{TERRAFORM_VERSION}}".to_string(), + backend_config: vec![], + auto_approve, + } +} + +/// Renders the Terraform steps as a GitHub Actions YAML snippet. +pub fn render_terraform_yaml(step: &TerraformStep, default_branch: &str) -> String { + let mut yaml = String::new(); + + // Setup Terraform + yaml.push_str(" - name: Set up Terraform\n"); + yaml.push_str(" uses: hashicorp/setup-terraform@v3\n"); + if step.version != "{{TERRAFORM_VERSION}}" { + yaml.push_str(&format!( + " with:\n terraform_version: {}\n", + step.version + )); + } + yaml.push('\n'); + + // Terraform Init + yaml.push_str(" - name: Terraform Init\n"); + yaml.push_str(&format!( + " working-directory: {}\n", + step.working_directory + )); + let mut init_cmd = "terraform init -input=false".to_string(); + for bc in &step.backend_config { + init_cmd.push_str(&format!(" {bc}")); + } + yaml.push_str(&format!(" run: {init_cmd}\n\n")); + + // Terraform Plan + yaml.push_str(" - name: Terraform Plan\n"); + yaml.push_str(&format!( + " working-directory: {}\n", + step.working_directory + )); + yaml.push_str(" run: terraform plan -input=false -out=tfplan\n"); + yaml.push_str(" env:\n"); + yaml.push_str(" TF_VAR_image_tag: ${{ env.IMAGE_TAG }}\n\n"); + + // Terraform Apply + yaml.push_str(" - name: Terraform Apply\n"); + yaml.push_str(&format!( + " if: github.ref == 'refs/heads/{default_branch}'\n" + )); + yaml.push_str(&format!( + " working-directory: {}\n", + step.working_directory + )); + if step.auto_approve { + yaml.push_str(" run: terraform apply -auto-approve -input=false tfplan\n"); + } else { + yaml.push_str(" run: terraform apply -input=false tfplan\n"); + } + yaml.push_str(" env:\n"); + yaml.push_str(" TF_VAR_image_tag: ${{ env.IMAGE_TAG }}\n"); + + yaml +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn generate_step_sets_working_directory() { + let step = generate_terraform_step("terraform", true); + assert_eq!(step.working_directory, "terraform"); + } + + #[test] + fn generate_step_defaults_to_placeholder_version() { + let step = generate_terraform_step("infra", false); + assert_eq!(step.version, "{{TERRAFORM_VERSION}}"); + } + + #[test] + fn generate_step_auto_approve_flag() { + let step = generate_terraform_step("tf", true); + assert!(step.auto_approve); + let step2 = generate_terraform_step("tf", false); + assert!(!step2.auto_approve); + } + + #[test] + fn yaml_contains_setup_terraform() { + let step = generate_terraform_step("terraform", true); + let yaml = render_terraform_yaml(&step, "main"); + assert!(yaml.contains("hashicorp/setup-terraform@v3")); + } + + #[test] + fn yaml_contains_terraform_init() { + let step = generate_terraform_step("infra", false); + let yaml = render_terraform_yaml(&step, "main"); + assert!(yaml.contains("Terraform Init")); + assert!(yaml.contains("terraform init -input=false")); + } + + #[test] + fn yaml_contains_terraform_plan() { + let step = generate_terraform_step("terraform", false); + let yaml = render_terraform_yaml(&step, "main"); + assert!(yaml.contains("Terraform Plan")); + assert!(yaml.contains("terraform plan")); + } + + #[test] + fn yaml_injects_tf_var_image_tag() { + let step = generate_terraform_step("terraform", false); + let yaml = render_terraform_yaml(&step, "main"); + assert!(yaml.contains("TF_VAR_image_tag")); + } + + #[test] + fn yaml_apply_gated_by_branch() { + let step = generate_terraform_step("terraform", false); + let yaml = render_terraform_yaml(&step, "main"); + assert!(yaml.contains("if: github.ref == 'refs/heads/main'")); + } + + #[test] + fn yaml_apply_auto_approve_when_set() { + let step = generate_terraform_step("terraform", true); + let yaml = render_terraform_yaml(&step, "main"); + assert!(yaml.contains("-auto-approve")); + } + + #[test] + fn yaml_apply_no_auto_approve_when_unset() { + let step = generate_terraform_step("terraform", false); + let yaml = render_terraform_yaml(&step, "main"); + // Should have "terraform apply -input=false tfplan" without -auto-approve + let apply_line = yaml + .lines() + .find(|l| l.contains("terraform apply")) + .unwrap(); + assert!(!apply_line.contains("-auto-approve")); + } + + #[test] + fn yaml_uses_working_directory() { + let step = generate_terraform_step("infra/prod", false); + let yaml = render_terraform_yaml(&step, "main"); + assert!(yaml.contains("working-directory: infra/prod")); + } + + #[test] + fn yaml_custom_branch() { + let step = generate_terraform_step("terraform", false); + let yaml = render_terraform_yaml(&step, "master"); + assert!(yaml.contains("refs/heads/master")); + } + + #[test] + fn yaml_backend_config() { + let mut step = generate_terraform_step("terraform", false); + step.backend_config = vec!["-backend-config=env/prod.hcl".to_string()]; + let yaml = render_terraform_yaml(&step, "main"); + assert!(yaml.contains("-backend-config=env/prod.hcl")); + } + + #[test] + fn yaml_no_version_with_when_placeholder() { + let step = generate_terraform_step("terraform", false); + let yaml = render_terraform_yaml(&step, "main"); + // When version is placeholder, terraform_version with: block should not be emitted + assert!(!yaml.contains("terraform_version:")); + } + + #[test] + fn yaml_version_with_when_set() { + let mut step = generate_terraform_step("terraform", false); + step.version = "1.7.0".to_string(); + let yaml = render_terraform_yaml(&step, "main"); + assert!(yaml.contains("terraform_version: 1.7.0")); + } +} diff --git a/src/generator/cd_generation/token_resolver.rs b/src/generator/cd_generation/token_resolver.rs new file mode 100644 index 00000000..e2a942b2 --- /dev/null +++ b/src/generator/cd_generation/token_resolver.rs @@ -0,0 +1,511 @@ +//! CD Token Resolution Engine — adapted from CI-15 for CD tokens. +//! +//! Two-pass strategy identical to the CI resolver: +//! 1. **Deterministic pass** — replaces `{{TOKEN_NAME}}` in String fields +//! when the value can be derived unambiguously from `CdContext`. +//! 2. **Placeholder pass** — any remaining `{{TOKEN_NAME}}` pattern becomes +//! an `UnresolvedToken` in `pipeline.unresolved_tokens`. + +use std::collections::HashMap; + +use regex::Regex; + +use super::context::{CdContext, Registry}; +use super::schema::{CdPipeline, UnresolvedToken}; + +/// A map from `TOKEN_NAME` to its resolved value. +pub type ResolvedTokenMap = HashMap; + +/// Runs the two-pass resolution engine on `pipeline` in place. +/// +/// Returns the map of deterministically resolved tokens; callers pass this +/// to the manifest writer. +pub fn resolve_tokens(ctx: &CdContext, pipeline: &mut CdPipeline) -> ResolvedTokenMap { + let resolved = build_resolved_map(ctx); + let re = Regex::new(r"\{\{([A-Z][A-Z0-9_]*)\}\}").expect("static regex is valid"); + apply_to_pipeline(pipeline, &resolved, &re); + resolved +} + +// ── Private helpers ─────────────────────────────────────────────────────────── + +/// Builds the deterministic token map from `CdContext`. +fn build_resolved_map(ctx: &CdContext) -> ResolvedTokenMap { + let mut map = HashMap::new(); + + map.insert("PROJECT_NAME".to_string(), ctx.project_name.clone()); + map.insert("IMAGE_NAME".to_string(), ctx.image_name.clone()); + map.insert("DEFAULT_BRANCH".to_string(), ctx.default_branch.clone()); + + // Registry URL is deterministic for known registries. + match &ctx.registry { + Registry::Ghcr => { + map.insert("REGISTRY_URL".to_string(), "ghcr.io".to_string()); + } + Registry::Acr | Registry::Gar | Registry::Custom(_) => { + // These remain as placeholders — user must supply. + } + } + + // Health check path if detected. + if let Some(hp) = &ctx.health_check_path { + map.insert("HEALTH_CHECK_PATH".to_string(), hp.clone()); + } + + // Terraform directory if detected. + if let Some(tf_dir) = &ctx.terraform_dir { + map.insert( + "TERRAFORM_DIR".to_string(), + tf_dir.to_string_lossy().into_owned(), + ); + } + + // K8s manifest directory if detected. + if let Some(k8s_dir) = &ctx.k8s_manifest_dir { + map.insert( + "K8S_MANIFEST_DIR".to_string(), + k8s_dir.to_string_lossy().into_owned(), + ); + } + + // Helm chart directory if detected. + if let Some(helm_dir) = &ctx.helm_chart_dir { + map.insert( + "HELM_CHART_DIR".to_string(), + helm_dir.to_string_lossy().into_owned(), + ); + } + + map +} + +/// Visits every `String` field in the `CdPipeline` that may carry a `{{TOKEN}}` +/// and applies both resolution passes. +fn apply_to_pipeline(pipeline: &mut CdPipeline, resolved: &ResolvedTokenMap, re: &Regex) { + let acc = &mut pipeline.unresolved_tokens; + + // Top-level fields. + resolve_str(&mut pipeline.project_name, resolved, re, acc); + resolve_str(&mut pipeline.image_name, resolved, re, acc); + resolve_str(&mut pipeline.default_branch, resolved, re, acc); + + // Auth step. + if let Some(action) = &mut pipeline.auth.action { + resolve_str(action, resolved, re, acc); + } + resolve_str(&mut pipeline.auth.method, resolved, re, acc); + for s in &mut pipeline.auth.required_secrets { + resolve_str(s, resolved, re, acc); + } + + // Registry step. + resolve_str(&mut pipeline.registry.registry_url, resolved, re, acc); + + // Docker build + push step. + resolve_str(&mut pipeline.docker_build_push.image_tag, resolved, re, acc); + resolve_str(&mut pipeline.docker_build_push.context, resolved, re, acc); + resolve_str(&mut pipeline.docker_build_push.dockerfile, resolved, re, acc); + for arg in &mut pipeline.docker_build_push.build_args { + resolve_str(arg, resolved, re, acc); + } + + // Migration step. + if let Some(mig) = &mut pipeline.migration { + resolve_str(&mut mig.command, resolved, re, acc); + } + + // Terraform step. + if let Some(tf) = &mut pipeline.terraform { + resolve_str(&mut tf.working_directory, resolved, re, acc); + resolve_str(&mut tf.version, resolved, re, acc); + for bc in &mut tf.backend_config { + resolve_str(bc, resolved, re, acc); + } + } + + // Deploy step. + resolve_str(&mut pipeline.deploy.command, resolved, re, acc); + for arg in &mut pipeline.deploy.args { + resolve_str(arg, resolved, re, acc); + } + + // Health check step. + resolve_str(&mut pipeline.health_check.url, resolved, re, acc); + + // Rollback info. + resolve_str(&mut pipeline.rollback_info.command_hint, resolved, re, acc); + + // Notification step. + if let Some(notify) = &mut pipeline.notifications { + resolve_str(&mut notify.webhook_secret, resolved, re, acc); + } + + // Environment configs. + for env in &mut pipeline.environments { + if let Some(url) = &mut env.app_url { + resolve_str(url, resolved, re, acc); + } + if let Some(ns) = &mut env.namespace { + resolve_str(ns, resolved, re, acc); + } + } +} + +/// Resolves known tokens and collects unknown ones from a single `String` field. +fn resolve_str( + field: &mut String, + resolved: &ResolvedTokenMap, + re: &Regex, + acc: &mut Vec, +) { + // Pass 1: replace deterministic tokens. + for (name, value) in resolved { + let placeholder = format!("{{{{{}}}}}", name); + if field.contains(&placeholder) { + *field = field.replace(&placeholder, value); + } + } + + // Pass 2: collect remaining placeholders as unresolved. + let snapshot = field.clone(); + for cap in re.captures_iter(&snapshot) { + let name = cap[1].to_string(); + if !acc.iter().any(|u| u.name == name) { + acc.push(UnresolvedToken::new( + &name, + "Provide a value for this token", + "string", + )); + } + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use super::super::context::*; + use super::super::schema::*; + use crate::analyzer::{AnalysisMetadata, ProjectAnalysis}; + use std::path::PathBuf; + + /// Build a minimal `ProjectAnalysis` for testing. + #[allow(deprecated)] + fn stub_analysis() -> ProjectAnalysis { + ProjectAnalysis { + project_root: PathBuf::from("/tmp/test-app"), + languages: vec![], + technologies: vec![], + frameworks: vec![], + dependencies: Default::default(), + entry_points: vec![], + ports: vec![], + health_endpoints: vec![], + environment_variables: vec![], + project_type: crate::analyzer::ProjectType::Unknown, + build_scripts: vec![], + services: vec![], + architecture_type: crate::analyzer::ArchitectureType::Monolithic, + docker_analysis: None, + infrastructure: None, + analysis_metadata: AnalysisMetadata { + timestamp: String::new(), + analyzer_version: String::new(), + analysis_duration_ms: 0, + files_analyzed: 0, + confidence_score: 0.0, + }, + } + } + + /// Build a minimal `CdContext` for testing. + fn make_test_context() -> CdContext { + CdContext { + analysis: stub_analysis(), + project_name: "test-app".to_string(), + platform: CdPlatform::Gcp, + deploy_target: DeployTarget::CloudRun, + environments: vec![Environment { + name: "production".to_string(), + requires_approval: false, + }], + registry: Registry::Ghcr, + image_name: "test-app".to_string(), + has_terraform: false, + terraform_dir: None, + has_k8s_manifests: false, + k8s_manifest_dir: None, + has_helm_chart: false, + helm_chart_dir: None, + migration_tool: None, + migration_command_override: None, + health_check_path: Some("/health".to_string()), + default_branch: "main".to_string(), + has_dockerfile: true, + } + } + + /// Build a minimal `CdPipeline` for testing, with placeholders. + fn make_test_pipeline() -> CdPipeline { + CdPipeline { + project_name: "{{PROJECT_NAME}}".to_string(), + platform: CdPlatform::Gcp, + deploy_target: DeployTarget::CloudRun, + environments: vec![EnvironmentConfig { + name: "production".to_string(), + branch_filter: Some("main".to_string()), + requires_approval: false, + app_url: Some("https://{{APP_URL}}".to_string()), + namespace: None, + replicas: None, + }], + auth: AuthStep { + action: Some("google-github-actions/auth@v2".to_string()), + method: "workload-identity".to_string(), + required_secrets: vec![ + "GCP_WORKLOAD_IDENTITY_PROVIDER".to_string(), + "GCP_SERVICE_ACCOUNT".to_string(), + ], + }, + registry: RegistryStep { + registry: Registry::Ghcr, + login_action: Some("docker/login-action@v3".to_string()), + registry_url: "{{REGISTRY_URL}}".to_string(), + }, + docker_build_push: DockerBuildPushStep { + image_tag: "{{REGISTRY_URL}}/{{IMAGE_NAME}}:sha".to_string(), + context: ".".to_string(), + dockerfile: "Dockerfile".to_string(), + push: true, + buildx: false, + build_args: vec![], + }, + migration: None, + terraform: None, + deploy: DeployStep { + strategy: "rolling".to_string(), + command: "gcloud run deploy {{PROJECT_NAME}}".to_string(), + args: vec!["--region={{GCP_REGION}}".to_string()], + target: DeployTarget::CloudRun, + }, + health_check: HealthCheckStep { + url: "https://{{APP_URL}}/{{HEALTH_CHECK_PATH}}".to_string(), + retries: 5, + interval_secs: 10, + expected_status: 200, + }, + rollback_info: RollbackInfo { + strategy: "redeploy-previous".to_string(), + command_hint: "gcloud run services update-traffic --to-revisions=LATEST=100" + .to_string(), + }, + notifications: None, + unresolved_tokens: vec![], + default_branch: "{{DEFAULT_BRANCH}}".to_string(), + image_name: "{{IMAGE_NAME}}".to_string(), + } + } + + // ── Deterministic pass tests ────────────────────────────────────────────── + + #[test] + fn project_name_token_resolved() { + let ctx = make_test_context(); + let mut pipeline = make_test_pipeline(); + + resolve_tokens(&ctx, &mut pipeline); + + assert_eq!(pipeline.project_name, "test-app"); + } + + #[test] + fn image_name_token_resolved() { + let ctx = make_test_context(); + let mut pipeline = make_test_pipeline(); + + resolve_tokens(&ctx, &mut pipeline); + + assert_eq!(pipeline.image_name, "test-app"); + } + + #[test] + fn default_branch_token_resolved() { + let ctx = make_test_context(); + let mut pipeline = make_test_pipeline(); + + resolve_tokens(&ctx, &mut pipeline); + + assert_eq!(pipeline.default_branch, "main"); + } + + #[test] + fn registry_url_resolved_for_ghcr() { + let ctx = make_test_context(); + let mut pipeline = make_test_pipeline(); + + resolve_tokens(&ctx, &mut pipeline); + + assert_eq!(pipeline.registry.registry_url, "ghcr.io"); + assert_eq!( + pipeline.docker_build_push.image_tag, + "ghcr.io/test-app:sha" + ); + } + + #[test] + fn health_check_path_resolved() { + let ctx = make_test_context(); + let mut pipeline = make_test_pipeline(); + + resolve_tokens(&ctx, &mut pipeline); + + // The health check URL should have HEALTH_CHECK_PATH replaced, + // but APP_URL remains unresolved. + assert!(pipeline.health_check.url.contains("/health")); + } + + #[test] + fn deploy_command_resolved() { + let ctx = make_test_context(); + let mut pipeline = make_test_pipeline(); + + resolve_tokens(&ctx, &mut pipeline); + + assert_eq!(pipeline.deploy.command, "gcloud run deploy test-app"); + } + + // ── Placeholder pass tests ──────────────────────────────────────────────── + + #[test] + fn unknown_token_becomes_unresolved() { + let ctx = make_test_context(); + let mut pipeline = make_test_pipeline(); + + resolve_tokens(&ctx, &mut pipeline); + + let names: Vec<&str> = pipeline + .unresolved_tokens + .iter() + .map(|u| u.name.as_str()) + .collect(); + assert!(names.contains(&"GCP_REGION"), "GCP_REGION should be unresolved"); + assert!(names.contains(&"APP_URL"), "APP_URL should be unresolved"); + } + + #[test] + fn duplicate_tokens_deduplicated() { + let ctx = make_test_context(); + let mut pipeline = make_test_pipeline(); + + resolve_tokens(&ctx, &mut pipeline); + + let app_url_count = pipeline + .unresolved_tokens + .iter() + .filter(|u| u.name == "APP_URL") + .count(); + assert_eq!(app_url_count, 1, "APP_URL should appear exactly once"); + } + + #[test] + fn acr_registry_url_stays_unresolved() { + let mut ctx = make_test_context(); + ctx.registry = Registry::Acr; + let mut pipeline = make_test_pipeline(); + pipeline.registry.registry_url = "{{ACR_LOGIN_SERVER}}".to_string(); + + resolve_tokens(&ctx, &mut pipeline); + + assert_eq!(pipeline.registry.registry_url, "{{ACR_LOGIN_SERVER}}"); + let names: Vec<&str> = pipeline + .unresolved_tokens + .iter() + .map(|u| u.name.as_str()) + .collect(); + assert!(names.contains(&"ACR_LOGIN_SERVER")); + } + + #[test] + fn terraform_dir_resolved_when_present() { + let mut ctx = make_test_context(); + ctx.has_terraform = true; + ctx.terraform_dir = Some(PathBuf::from("infra/terraform")); + + let mut pipeline = make_test_pipeline(); + pipeline.terraform = Some(TerraformStep { + working_directory: "{{TERRAFORM_DIR}}".to_string(), + version: "{{TERRAFORM_VERSION}}".to_string(), + backend_config: vec![], + auto_approve: false, + }); + + let resolved = resolve_tokens(&ctx, &mut pipeline); + + assert_eq!( + pipeline.terraform.as_ref().unwrap().working_directory, + "infra/terraform" + ); + assert!(resolved.contains_key("TERRAFORM_DIR")); + // TERRAFORM_VERSION is still unresolved. + let names: Vec<&str> = pipeline + .unresolved_tokens + .iter() + .map(|u| u.name.as_str()) + .collect(); + assert!(names.contains(&"TERRAFORM_VERSION")); + } + + #[test] + fn resolved_map_contains_expected_keys() { + let ctx = make_test_context(); + let mut pipeline = make_test_pipeline(); + + let resolved = resolve_tokens(&ctx, &mut pipeline); + + assert_eq!(resolved.get("PROJECT_NAME").map(|s| s.as_str()), Some("test-app")); + assert_eq!(resolved.get("IMAGE_NAME").map(|s| s.as_str()), Some("test-app")); + assert_eq!(resolved.get("DEFAULT_BRANCH").map(|s| s.as_str()), Some("main")); + assert_eq!(resolved.get("REGISTRY_URL").map(|s| s.as_str()), Some("ghcr.io")); + assert_eq!(resolved.get("HEALTH_CHECK_PATH").map(|s| s.as_str()), Some("/health")); + } + + #[test] + fn migration_command_tokens_resolved() { + let mut ctx = make_test_context(); + ctx.project_name = "mydb".to_string(); + let mut pipeline = make_test_pipeline(); + pipeline.migration = Some(MigrationStep { + tool: MigrationTool::Prisma, + command: "npx prisma migrate deploy --schema={{PROJECT_NAME}}/prisma/schema.prisma" + .to_string(), + via_ssh: false, + }); + + resolve_tokens(&ctx, &mut pipeline); + + assert_eq!( + pipeline.migration.as_ref().unwrap().command, + "npx prisma migrate deploy --schema=mydb/prisma/schema.prisma" + ); + } + + #[test] + fn environment_app_url_resolved_when_deterministic() { + let mut ctx = make_test_context(); + // Make REGISTRY_URL deterministic (GHCR). + ctx.registry = Registry::Ghcr; + let mut pipeline = make_test_pipeline(); + // APP_URL is not deterministic — should stay unresolved. + pipeline.environments[0].app_url = Some("https://{{APP_URL}}/home".to_string()); + + resolve_tokens(&ctx, &mut pipeline); + + // APP_URL stays as placeholder. + assert!(pipeline.environments[0] + .app_url + .as_ref() + .unwrap() + .contains("{{APP_URL}}")); + } +} diff --git a/src/generator/cd_generation/versioning.rs b/src/generator/cd_generation/versioning.rs new file mode 100644 index 00000000..557b4ed9 --- /dev/null +++ b/src/generator/cd_generation/versioning.rs @@ -0,0 +1,174 @@ +//! CD-15 — Artifact Versioning & Image Tag Strategy +//! +//! Generates a consistent image tagging scheme across CI and CD: +//! +//! ```yaml +//! env: +//! IMAGE_TAG: ${{ github.repository }}:${{ github.sha }} +//! IMAGE_TAG_LATEST: ${{ github.repository }}:latest +//! IMAGE_TAG_VERSION: ${{ github.repository }}:${{ github.ref_name }} +//! ``` +//! +//! Tag matrix: +//! - Every push to main → SHA tag + `latest` +//! - Tag push (`v1.2.3`) → version tag + `latest` +//! - PR → SHA tag only (no `latest`) + +// ── Types ───────────────────────────────────────────────────────────────────── + +/// Image tag strategy for the CD pipeline. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum TagStrategy { + /// Always use `` as the primary tag. + Sha, + /// Use semver when a tag is pushed, SHA otherwise. + SemverWithShaFallback, +} + +/// Represents the set of image tags to apply. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ImageTags { + /// Primary tag (always present), e.g. `ghcr.io/org/app:${{ github.sha }}`. + pub sha_tag: String, + /// Latest tag (only on default branch push), e.g. `ghcr.io/org/app:latest`. + pub latest_tag: String, + /// Version tag (only on tag push), e.g. `ghcr.io/org/app:${{ github.ref_name }}`. + pub version_tag: String, +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Computes the image tags for the given registry URL and image name. +pub fn compute_image_tags(registry_url: &str, image_name: &str) -> ImageTags { + let base = if registry_url.is_empty() { + image_name.to_string() + } else { + format!("{registry_url}/{image_name}") + }; + + ImageTags { + sha_tag: format!("{base}:${{{{ github.sha }}}}"), + latest_tag: format!("{base}:latest"), + version_tag: format!("{base}:${{{{ github.ref_name }}}}"), + } +} + +/// Renders the `env:` block with image tag environment variables. +/// +/// These are placed at the top level of the workflow YAML so all jobs +/// can reference `${{ env.IMAGE_TAG }}` etc. +pub fn render_versioning_env_block(tags: &ImageTags) -> String { + format!( + "\ +env: + IMAGE_TAG: {sha} + IMAGE_TAG_LATEST: {latest} + IMAGE_TAG_VERSION: {version} +", + sha = tags.sha_tag, + latest = tags.latest_tag, + version = tags.version_tag, + ) +} + +/// Renders a GitHub Actions step that computes the effective tag list +/// based on the event context (push to main, tag push, PR). +pub fn render_tag_resolution_step() -> String { + "\ + - name: Compute image tags + id: tags + run: | + TAGS=\"${{ env.IMAGE_TAG }}\" + if [[ \"${{ github.ref }}\" == refs/heads/${{ github.event.repository.default_branch }} ]]; then + TAGS=\"${TAGS},${{ env.IMAGE_TAG_LATEST }}\" + fi + if [[ \"${{ github.ref }}\" == refs/tags/v* ]]; then + TAGS=\"${TAGS},${{ env.IMAGE_TAG_VERSION }},${{ env.IMAGE_TAG_LATEST }}\" + fi + echo \"tags=${TAGS}\" >> \"$GITHUB_OUTPUT\" +" + .to_string() +} + +/// Returns the expression to reference the computed tags in a build step. +pub fn tags_output_expression() -> &'static str { + "${{ steps.tags.outputs.tags }}" +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn sha_tag_contains_github_sha() { + let tags = compute_image_tags("ghcr.io", "my-app"); + assert!(tags.sha_tag.contains("github.sha")); + assert!(tags.sha_tag.starts_with("ghcr.io/my-app:")); + } + + #[test] + fn latest_tag_is_literal_latest() { + let tags = compute_image_tags("ghcr.io", "my-app"); + assert!(tags.latest_tag.ends_with(":latest")); + } + + #[test] + fn version_tag_contains_ref_name() { + let tags = compute_image_tags("ghcr.io", "my-app"); + assert!(tags.version_tag.contains("github.ref_name")); + } + + #[test] + fn empty_registry_url_uses_image_name_only() { + let tags = compute_image_tags("", "my-app"); + assert!(tags.sha_tag.starts_with("my-app:")); + } + + #[test] + fn env_block_contains_all_three_vars() { + let tags = compute_image_tags("ghcr.io", "my-app"); + let block = render_versioning_env_block(&tags); + assert!(block.contains("IMAGE_TAG:")); + assert!(block.contains("IMAGE_TAG_LATEST:")); + assert!(block.contains("IMAGE_TAG_VERSION:")); + } + + #[test] + fn tag_resolution_step_checks_default_branch() { + let step = render_tag_resolution_step(); + assert!(step.contains("default_branch")); + } + + #[test] + fn tag_resolution_step_checks_semver_tag() { + let step = render_tag_resolution_step(); + assert!(step.contains("refs/tags/v*")); + } + + #[test] + fn tag_resolution_step_outputs_to_github_output() { + let step = render_tag_resolution_step(); + assert!(step.contains("GITHUB_OUTPUT")); + } + + #[test] + fn tags_output_expression_references_step() { + let expr = tags_output_expression(); + assert!(expr.contains("steps.tags.outputs.tags")); + } + + #[test] + fn acr_registry_url_produces_correct_tags() { + let tags = compute_image_tags("myapp.azurecr.io", "api"); + assert!(tags.sha_tag.starts_with("myapp.azurecr.io/api:")); + assert!(tags.latest_tag.starts_with("myapp.azurecr.io/api:")); + } + + #[test] + fn gar_registry_url_produces_correct_tags() { + let tags = compute_image_tags("us-docker.pkg.dev/my-project", "api"); + assert!(tags.sha_tag.starts_with("us-docker.pkg.dev/my-project/api:")); + } +} diff --git a/src/generator/cd_generation/writer.rs b/src/generator/cd_generation/writer.rs new file mode 100644 index 00000000..6d3707b7 --- /dev/null +++ b/src/generator/cd_generation/writer.rs @@ -0,0 +1,280 @@ +//! CD File Writer +//! +//! Writes generated CD pipeline files to the correct output paths. +//! Mirrors the CI writer (`ci_generation/writer.rs`) pattern but produces: +//! +//! | Kind | Path | +//! |------------------|-----------------------------------------------| +//! | Azure pipeline | `.github/workflows/deploy-azure.yml` | +//! | GCP pipeline | `.github/workflows/deploy-gcp.yml` | +//! | Hetzner pipeline | `.github/workflows/deploy-hetzner.yml` | +//! | CD manifest | `.syncable/cd-manifest.toml` | +//! +//! The writer validates YAML content before writing and provides a +//! `WriteSummary` for the CLI to display results. + +use std::fs; +use std::path::{Path, PathBuf}; + +use crate::generator::cd_generation::context::CdPlatform; + +// ── Public types ────────────────────────────────────────────────────────────── + +/// Classifies the kind of CD file being written. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum CdFileKind { + /// Main CD pipeline YAML for a specific platform. + Pipeline(CdPlatform), + /// `.syncable/cd-manifest.toml` + Manifest, +} + +/// A generated CD file ready to be written. +#[derive(Debug, Clone)] +pub struct CdFile { + /// File content (YAML or TOML). + pub content: String, + /// What kind of file this is — drives path resolution. + pub kind: CdFileKind, +} + +impl CdFile { + /// Constructs a pipeline YAML file. + pub fn pipeline(content: String, platform: CdPlatform) -> Self { + Self { + content, + kind: CdFileKind::Pipeline(platform), + } + } + + /// Constructs a manifest file. + pub fn manifest(content: String) -> Self { + Self { + content, + kind: CdFileKind::Manifest, + } + } + + /// Resolves the relative output path for this file. + pub fn relative_path(&self) -> PathBuf { + match &self.kind { + CdFileKind::Pipeline(platform) => { + let filename = match platform { + CdPlatform::Azure => "deploy-azure.yml", + CdPlatform::Gcp => "deploy-gcp.yml", + CdPlatform::Hetzner => "deploy-hetzner.yml", + }; + PathBuf::from(".github/workflows").join(filename) + } + CdFileKind::Manifest => PathBuf::from(".syncable/cd-manifest.toml"), + } + } +} + +/// Result of writing a single file. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum WriteOutcome { + /// File did not exist; was created. + Created, + /// File existed and was overwritten (force mode). + Overwritten, + /// File existed and was left unchanged (no force). + Skipped, +} + +/// Summary of a batch write operation. +#[derive(Debug, Default)] +pub struct WriteSummary { + pub results: Vec<(PathBuf, WriteOutcome)>, +} + +impl WriteSummary { + pub fn created(&self) -> usize { + self.results + .iter() + .filter(|(_, o)| *o == WriteOutcome::Created) + .count() + } + + pub fn overwritten(&self) -> usize { + self.results + .iter() + .filter(|(_, o)| *o == WriteOutcome::Overwritten) + .count() + } + + pub fn skipped(&self) -> usize { + self.results + .iter() + .filter(|(_, o)| *o == WriteOutcome::Skipped) + .count() + } +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Writes all generated CD files to `output_dir`. +/// +/// When `force` is `true`, existing files are overwritten. +/// When `force` is `false`, existing files are skipped. +pub fn write_cd_files( + files: &[CdFile], + output_dir: &Path, + force: bool, +) -> crate::Result { + let mut summary = WriteSummary::default(); + + for file in files { + let rel_path = file.relative_path(); + let full_path = output_dir.join(&rel_path); + + // Create parent directories. + if let Some(parent) = full_path.parent() { + fs::create_dir_all(parent)?; + } + + let outcome = if full_path.exists() { + if force { + fs::write(&full_path, &file.content)?; + WriteOutcome::Overwritten + } else { + WriteOutcome::Skipped + } + } else { + fs::write(&full_path, &file.content)?; + WriteOutcome::Created + }; + + summary.results.push((rel_path, outcome)); + } + + Ok(summary) +} + +/// Prints the dry-run output to stdout. +pub fn print_cd_dry_run(files: &[CdFile]) { + for file in files { + let path = file.relative_path(); + println!("═══ {} ═══", path.display()); + println!("{}", file.content); + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::tempdir; + + #[test] + fn azure_pipeline_path() { + let file = CdFile::pipeline("yaml".to_string(), CdPlatform::Azure); + assert_eq!( + file.relative_path(), + PathBuf::from(".github/workflows/deploy-azure.yml") + ); + } + + #[test] + fn gcp_pipeline_path() { + let file = CdFile::pipeline("yaml".to_string(), CdPlatform::Gcp); + assert_eq!( + file.relative_path(), + PathBuf::from(".github/workflows/deploy-gcp.yml") + ); + } + + #[test] + fn hetzner_pipeline_path() { + let file = CdFile::pipeline("yaml".to_string(), CdPlatform::Hetzner); + assert_eq!( + file.relative_path(), + PathBuf::from(".github/workflows/deploy-hetzner.yml") + ); + } + + #[test] + fn manifest_path() { + let file = CdFile::manifest("toml".to_string()); + assert_eq!( + file.relative_path(), + PathBuf::from(".syncable/cd-manifest.toml") + ); + } + + #[test] + fn write_creates_files() { + let dir = tempdir().unwrap(); + let files = vec![ + CdFile::pipeline("name: test".to_string(), CdPlatform::Azure), + CdFile::manifest("[resolved]".to_string()), + ]; + + let summary = write_cd_files(&files, dir.path(), false).unwrap(); + assert_eq!(summary.created(), 2); + assert_eq!(summary.skipped(), 0); + + // Verify files exist. + assert!(dir + .path() + .join(".github/workflows/deploy-azure.yml") + .exists()); + assert!(dir.path().join(".syncable/cd-manifest.toml").exists()); + } + + #[test] + fn write_skips_existing_without_force() { + let dir = tempdir().unwrap(); + let files = vec![CdFile::pipeline("v1".to_string(), CdPlatform::Azure)]; + + // First write. + write_cd_files(&files, dir.path(), false).unwrap(); + + // Second write — should skip. + let files2 = vec![CdFile::pipeline("v2".to_string(), CdPlatform::Azure)]; + let summary = write_cd_files(&files2, dir.path(), false).unwrap(); + assert_eq!(summary.skipped(), 1); + + // Content should still be v1. + let content = fs::read_to_string( + dir.path().join(".github/workflows/deploy-azure.yml"), + ) + .unwrap(); + assert_eq!(content, "v1"); + } + + #[test] + fn write_overwrites_existing_with_force() { + let dir = tempdir().unwrap(); + let files = vec![CdFile::pipeline("v1".to_string(), CdPlatform::Azure)]; + write_cd_files(&files, dir.path(), false).unwrap(); + + let files2 = vec![CdFile::pipeline("v2".to_string(), CdPlatform::Azure)]; + let summary = write_cd_files(&files2, dir.path(), true).unwrap(); + assert_eq!(summary.overwritten(), 1); + + let content = fs::read_to_string( + dir.path().join(".github/workflows/deploy-azure.yml"), + ) + .unwrap(); + assert_eq!(content, "v2"); + } + + #[test] + fn summary_counts_correct() { + let dir = tempdir().unwrap(); + // Create one file first. + let pre = vec![CdFile::pipeline("old".to_string(), CdPlatform::Azure)]; + write_cd_files(&pre, dir.path(), false).unwrap(); + + // Now write two: one existing (skip), one new (create). + let files = vec![ + CdFile::pipeline("new".to_string(), CdPlatform::Azure), + CdFile::manifest("toml".to_string()), + ]; + let summary = write_cd_files(&files, dir.path(), false).unwrap(); + assert_eq!(summary.created(), 1); + assert_eq!(summary.skipped(), 1); + } +} diff --git a/src/generator/ci_generation/build_step.rs b/src/generator/ci_generation/build_step.rs new file mode 100644 index 00000000..6389024a --- /dev/null +++ b/src/generator/ci_generation/build_step.rs @@ -0,0 +1,197 @@ +//! Build Step Generator — CI-07 +//! +//! Determines the build command and artifact output path for the project. +//! Returns `None` when no build step can be inferred (e.g. a library-only +//! project with no binary output). +//! +//! Resolution order: +//! 1. Explicitly detected `build_command` from project scripts (e.g. package.json `build`) +//! 2. Deterministic command inferred from `package_manager` / `primary_language` +//! 3. `{{BUILD_COMMAND}}` placeholder when nothing can be inferred + +use crate::generator::ci_generation::{ + context::{CiContext, PackageManager}, + schema::BuildStep, +}; + +/// Generates the build step, or `None` if the project produces no build artifact. +pub fn generate_build_step(ctx: &CiContext) -> Option { + // JS/TS projects: use the detected build script if present; fallback per package manager. + if matches!( + ctx.primary_language.to_lowercase().as_str(), + "javascript" | "typescript" | "js" | "ts" + ) { + return Some(js_build_step(ctx)); + } + + let (command, artifact_path) = match ctx.primary_language.to_lowercase().as_str() { + "rust" => ( + "cargo build --release".to_string(), + Some("target/release/".to_string()), + ), + "go" | "golang" => ( + "go build -o ./bin/app ./...".to_string(), + Some("bin/".to_string()), + ), + "python" => ( + "python -m build".to_string(), + Some("dist/".to_string()), + ), + "java" | "kotlin" => match &ctx.package_manager { + PackageManager::Gradle => ( + "./gradlew assemble".to_string(), + Some("build/libs/".to_string()), + ), + _ => ( + "mvn package -DskipTests".to_string(), + Some("target/".to_string()), + ), + }, + _ => { + // Fall back to an explicitly detected build command if we have one. + let cmd = ctx.build_command.clone().unwrap_or_else(|| "{{BUILD_COMMAND}}".to_string()); + return Some(BuildStep { command: cmd, artifact_path: None }); + } + }; + + Some(BuildStep { command, artifact_path }) +} + +/// Builds the step for JavaScript/TypeScript projects. +fn js_build_step(ctx: &CiContext) -> BuildStep { + // Prefer the build script surfaced from package.json scripts. + if let Some(cmd) = &ctx.build_command { + return BuildStep { + command: cmd.clone(), + artifact_path: Some("dist/".to_string()), + }; + } + + // Derive ` run build` from the detected package manager. + let command = match &ctx.package_manager { + PackageManager::Yarn => "yarn build", + PackageManager::Pnpm => "pnpm run build", + PackageManager::Bun => "bun run build", + _ => "npm run build", + }; + + BuildStep { + command: command.to_string(), + artifact_path: Some("dist/".to_string()), + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::generator::ci_generation::{context::CiContext, test_helpers::make_base_ctx}; + use tempfile::TempDir; + + fn ctx(language: &str, pm: PackageManager, build_cmd: Option<&str>) -> (CiContext, TempDir) { + let dir = TempDir::new().unwrap(); + let ctx = CiContext { + primary_language: language.to_string(), + package_manager: pm, + build_command: build_cmd.map(|s| s.to_string()), + ..make_base_ctx(dir.path(), language) + }; + (ctx, dir) + } + + // ── Rust ────────────────────────────────────────────────────────────────── + + #[test] + fn test_rust_release_build() { + let (c, _d) = ctx("rust", PackageManager::Cargo, None); + let step = generate_build_step(&c).expect("should produce step"); + assert_eq!(step.command, "cargo build --release"); + assert_eq!(step.artifact_path.as_deref(), Some("target/release/")); + } + + // ── Go ──────────────────────────────────────────────────────────────────── + + #[test] + fn test_go_build() { + let (c, _d) = ctx("go", PackageManager::GoMod, None); + let step = generate_build_step(&c).expect("should produce step"); + assert_eq!(step.command, "go build -o ./bin/app ./..."); + assert_eq!(step.artifact_path.as_deref(), Some("bin/")); + } + + // ── Python ──────────────────────────────────────────────────────────────── + + #[test] + fn test_python_wheel_build() { + let (c, _d) = ctx("python", PackageManager::Poetry, None); + let step = generate_build_step(&c).expect("should produce step"); + assert_eq!(step.command, "python -m build"); + assert_eq!(step.artifact_path.as_deref(), Some("dist/")); + } + + // ── Java ────────────────────────────────────────────────────────────────── + + #[test] + fn test_java_maven_package() { + let (c, _d) = ctx("java", PackageManager::Maven, None); + let step = generate_build_step(&c).expect("should produce step"); + assert_eq!(step.command, "mvn package -DskipTests"); + assert_eq!(step.artifact_path.as_deref(), Some("target/")); + } + + #[test] + fn test_java_gradle_assemble() { + let (c, _d) = ctx("java", PackageManager::Gradle, None); + let step = generate_build_step(&c).expect("should produce step"); + assert_eq!(step.command, "./gradlew assemble"); + assert_eq!(step.artifact_path.as_deref(), Some("build/libs/")); + } + + // ── JavaScript / TypeScript ─────────────────────────────────────────────── + + #[test] + fn test_js_uses_detected_build_script() { + let (c, _d) = ctx("javascript", PackageManager::Npm, Some("vite build")); + let step = generate_build_step(&c).expect("should produce step"); + assert_eq!(step.command, "vite build"); + } + + #[test] + fn test_js_npm_fallback() { + let (c, _d) = ctx("javascript", PackageManager::Npm, None); + let step = generate_build_step(&c).expect("should produce step"); + assert_eq!(step.command, "npm run build"); + assert_eq!(step.artifact_path.as_deref(), Some("dist/")); + } + + #[test] + fn test_js_yarn_fallback() { + let (c, _d) = ctx("javascript", PackageManager::Yarn, None); + let step = generate_build_step(&c).expect("should produce step"); + assert_eq!(step.command, "yarn build"); + } + + #[test] + fn test_ts_pnpm_fallback() { + let (c, _d) = ctx("typescript", PackageManager::Pnpm, None); + let step = generate_build_step(&c).expect("should produce step"); + assert_eq!(step.command, "pnpm run build"); + } + + // ── Unknown language fallback ───────────────────────────────────────────── + + #[test] + fn test_unknown_language_with_build_command() { + let (c, _d) = ctx("elixir", PackageManager::Unknown, Some("mix compile")); + let step = generate_build_step(&c).expect("should produce step"); + assert_eq!(step.command, "mix compile"); + } + + #[test] + fn test_unknown_language_no_build_command_yields_placeholder() { + let (c, _d) = ctx("elixir", PackageManager::Unknown, None); + let step = generate_build_step(&c).expect("should produce step"); + assert!(step.command.contains("{{BUILD_COMMAND}}")); + } +} diff --git a/src/generator/ci_generation/cache.rs b/src/generator/ci_generation/cache.rs new file mode 100644 index 00000000..25a456cb --- /dev/null +++ b/src/generator/ci_generation/cache.rs @@ -0,0 +1,204 @@ +//! CI-04 — Dependency cache strategy resolver. +//! +//! Maps a `CiContext`'s package manager and lock file to the GitHub Actions +//! `actions/cache` step configuration. Returns `None` when no lock file is +//! present so the caller can omit the step entirely. + +use serde::Serialize; + +use crate::generator::ci_generation::context::{CiContext, PackageManager}; + +// ── Public types ────────────────────────────────────────────────────────────── + +/// Cache step configuration for `actions/cache`. +#[derive(Debug, Clone, PartialEq, Serialize)] +pub struct CacheConfig { + /// Directories the runner should persist between jobs. + pub paths: Vec, + /// Primary cache key — busted when the lock file changes. + pub key: String, + /// Fallback prefix used when no exact key match exists. + pub restore_keys: Vec, +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Returns a `CacheConfig` for the detected package manager, or `None` when +/// no lock file was found (caller should omit the cache step entirely). +pub fn resolve_cache(ctx: &CiContext) -> Option { + // Without a verified lock file on disk the cache key expression is + // meaningless — skip the step rather than emit a broken config. + ctx.lock_file.as_ref()?; + + Some(match ctx.package_manager { + PackageManager::Npm => CacheConfig { + paths: vec!["~/.npm".into()], + key: "npm-${{ runner.os }}-${{ hashFiles('**/package-lock.json') }}".into(), + restore_keys: vec!["npm-${{ runner.os }}-".into()], + }, + PackageManager::Yarn => CacheConfig { + paths: vec![".yarn/cache".into(), ".yarn/unplugged".into()], + key: "yarn-${{ runner.os }}-${{ hashFiles('**/yarn.lock') }}".into(), + restore_keys: vec!["yarn-${{ runner.os }}-".into()], + }, + PackageManager::Pnpm => CacheConfig { + paths: vec!["~/.pnpm-store".into()], + key: "pnpm-${{ runner.os }}-${{ hashFiles('**/pnpm-lock.yaml') }}".into(), + restore_keys: vec!["pnpm-${{ runner.os }}-".into()], + }, + PackageManager::Bun => CacheConfig { + paths: vec!["~/.bun/install/cache".into()], + key: "bun-${{ runner.os }}-${{ hashFiles('**/bun.lock*') }}".into(), + restore_keys: vec!["bun-${{ runner.os }}-".into()], + }, + PackageManager::Pip => CacheConfig { + paths: vec!["~/.cache/pip".into()], + key: "pip-${{ runner.os }}-${{ hashFiles('**/requirements*.txt') }}".into(), + restore_keys: vec!["pip-${{ runner.os }}-".into()], + }, + PackageManager::Uv => CacheConfig { + paths: vec!["~/.cache/uv".into()], + key: "uv-${{ runner.os }}-${{ hashFiles('**/uv.lock') }}".into(), + restore_keys: vec!["uv-${{ runner.os }}-".into()], + }, + PackageManager::Poetry => CacheConfig { + paths: vec!["~/.cache/pypoetry".into()], + key: "poetry-${{ runner.os }}-${{ hashFiles('**/poetry.lock') }}".into(), + restore_keys: vec!["poetry-${{ runner.os }}-".into()], + }, + PackageManager::Cargo => CacheConfig { + paths: vec![ + "~/.cargo/registry/index".into(), + "~/.cargo/registry/cache".into(), + "~/.cargo/git/db".into(), + "target/".into(), + ], + key: "cargo-${{ runner.os }}-${{ hashFiles('**/Cargo.lock') }}".into(), + restore_keys: vec!["cargo-${{ runner.os }}-".into()], + }, + PackageManager::GoMod => CacheConfig { + paths: vec!["~/go/pkg/mod".into(), "~/.cache/go-build".into()], + key: "go-${{ runner.os }}-${{ hashFiles('**/go.sum') }}".into(), + restore_keys: vec!["go-${{ runner.os }}-".into()], + }, + PackageManager::Maven => CacheConfig { + paths: vec!["~/.m2/repository".into()], + key: "maven-${{ runner.os }}-${{ hashFiles('**/pom.xml') }}".into(), + restore_keys: vec!["maven-${{ runner.os }}-".into()], + }, + PackageManager::Gradle => CacheConfig { + paths: vec!["~/.gradle/caches".into(), "~/.gradle/wrapper".into()], + key: "gradle-${{ runner.os }}-${{ hashFiles('**/*.gradle*', '**/gradle-wrapper.properties') }}".into(), + restore_keys: vec!["gradle-${{ runner.os }}-".into()], + }, + PackageManager::Unknown => return None, + }) +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::generator::ci_generation::context::{Linter, PackageManager, TestFramework}; + use crate::generator::ci_generation::test_helpers::make_base_ctx; + use std::path::PathBuf; + use tempfile::TempDir; + + fn make_ctx(pm: PackageManager, lock_file: Option, root: &std::path::Path) -> CiContext { + CiContext { package_manager: pm, lock_file, ..make_base_ctx(root, "") } + } + + fn ctx_with_lock(pm: PackageManager, lock_name: &str) -> (CiContext, TempDir) { + let dir = TempDir::new().unwrap(); + let lock_path = dir.path().join(lock_name); + std::fs::write(&lock_path, "").unwrap(); + let ctx = make_ctx(pm, Some(lock_path), dir.path()); + (ctx, dir) + } + + // ── Happy-path per package manager ──────────────────────────────────────── + + #[test] + fn npm_cache() { + let (ctx, _dir) = ctx_with_lock(PackageManager::Npm, "package-lock.json"); + let cfg = resolve_cache(&ctx).unwrap(); + assert_eq!(cfg.paths, vec!["~/.npm"]); + assert!(cfg.key.contains("package-lock.json")); + assert_eq!(cfg.restore_keys, vec!["npm-${{ runner.os }}-"]); + } + + #[test] + fn yarn_cache() { + let (ctx, _dir) = ctx_with_lock(PackageManager::Yarn, "yarn.lock"); + let cfg = resolve_cache(&ctx).unwrap(); + assert!(cfg.paths.contains(&".yarn/cache".to_string())); + assert!(cfg.key.contains("yarn.lock")); + } + + #[test] + fn pnpm_cache() { + let (ctx, _dir) = ctx_with_lock(PackageManager::Pnpm, "pnpm-lock.yaml"); + let cfg = resolve_cache(&ctx).unwrap(); + assert_eq!(cfg.paths, vec!["~/.pnpm-store"]); + assert!(cfg.key.contains("pnpm-lock.yaml")); + } + + #[test] + fn cargo_cache_has_target_dir() { + let (ctx, _dir) = ctx_with_lock(PackageManager::Cargo, "Cargo.lock"); + let cfg = resolve_cache(&ctx).unwrap(); + assert!(cfg.paths.contains(&"target/".to_string())); + assert!(cfg.key.contains("Cargo.lock")); + } + + #[test] + fn go_cache_has_build_cache() { + let (ctx, _dir) = ctx_with_lock(PackageManager::GoMod, "go.sum"); + let cfg = resolve_cache(&ctx).unwrap(); + assert!(cfg.paths.contains(&"~/.cache/go-build".to_string())); + assert!(cfg.key.contains("go.sum")); + } + + #[test] + fn poetry_cache() { + let (ctx, _dir) = ctx_with_lock(PackageManager::Poetry, "poetry.lock"); + let cfg = resolve_cache(&ctx).unwrap(); + assert_eq!(cfg.paths, vec!["~/.cache/pypoetry"]); + assert!(cfg.key.contains("poetry.lock")); + } + + #[test] + fn maven_cache() { + let (ctx, _dir) = ctx_with_lock(PackageManager::Maven, "pom.xml"); + let cfg = resolve_cache(&ctx).unwrap(); + assert_eq!(cfg.paths, vec!["~/.m2/repository"]); + assert!(cfg.key.contains("pom.xml")); + } + + #[test] + fn gradle_cache_includes_wrapper() { + let (ctx, _dir) = ctx_with_lock(PackageManager::Gradle, "build.gradle"); + let cfg = resolve_cache(&ctx).unwrap(); + assert!(cfg.paths.contains(&"~/.gradle/wrapper".to_string())); + } + + // ── Skip-cache cases ────────────────────────────────────────────────────── + + #[test] + fn no_lock_file_returns_none() { + let dir = TempDir::new().unwrap(); + let ctx = make_ctx(PackageManager::Npm, None, dir.path()); + assert!(resolve_cache(&ctx).is_none()); + } + + #[test] + fn unknown_pm_returns_none() { + let dir = TempDir::new().unwrap(); + // Even with a lock_file path set, Unknown PM should return None + let lock = dir.path().join("some.lock"); + std::fs::write(&lock, "").unwrap(); + let ctx = make_ctx(PackageManager::Unknown, Some(lock), dir.path()); + assert!(resolve_cache(&ctx).is_none()); + } +} diff --git a/src/generator/ci_generation/ci_config.rs b/src/generator/ci_generation/ci_config.rs new file mode 100644 index 00000000..f6d552da --- /dev/null +++ b/src/generator/ci_generation/ci_config.rs @@ -0,0 +1,345 @@ +//! CI-22 — `.syncable.ci.toml` Project-Level Config +//! +//! Parses the optional `[ci]` block from `.syncable.toml` (or a standalone +//! `.syncable.ci.toml`). Every field carries `#[serde(default)]` so partial +//! configs are always valid — only the keys present in the file are applied. +//! +//! Priority order (lowest → highest): +//! detected value < config file < CLI flags +//! +//! `merge_config_into_context()` applies the config-file layer; CLI flags are +//! handled in `handle_generate_ci()` after this call. + +use std::path::Path; + +use serde::Deserialize; + +use crate::cli::{CiFormat, CiPlatform}; +use crate::generator::ci_generation::context::CiContext; + +// ── Config struct ───────────────────────────────────────────────────────────── + +/// Represents the `[ci]` section of `.syncable.toml` / `.syncable.ci.toml`. +/// +/// All fields are `Option` so that absent keys are distinguishable from +/// explicit `""` values, and `Default` gives every field `None` which the +/// merge function treats as "not set — keep the detected value". +#[derive(Debug, Clone, Deserialize, Default)] +#[serde(default)] +pub struct CiConfig { + /// Override the detected platform. + pub platform: Option, + /// Override the effective CI format. + pub format: Option, + /// Override the detected default branch. + pub default_branch: Option, + /// Additional branches appended to push/PR triggers. + pub extra_branches: Option>, + /// Override the detected test invocation command. + pub test_command: Option, + /// Override the detected build command. + pub build_command: Option, + /// Step names to omit from the generated pipeline (e.g. `["lint"]`). + pub skip_steps: Option>, + /// Custom prefix for secrets/env variable names (e.g. `"MYAPP"`). + pub env_prefix: Option, +} + +/// Wraps `CiConfig` when parsing from a full `.syncable.toml` that uses a +/// `[ci]` table header. +#[derive(Debug, Deserialize, Default)] +#[serde(default)] +struct SyncableToml { + ci: CiConfig, +} + +// ── File discovery ──────────────────────────────────────────────────────────── + +/// Attempts to load CI config from the project root. +/// +/// Look-up order: +/// 1. `.syncable.ci.toml` — dedicated file, takes precedence +/// 2. `.syncable.toml` — shared config, reads the `[ci]` table +/// +/// Returns `None` when neither file exists (not an error — just unconfigured). +pub fn load_ci_config(project_root: &Path) -> crate::Result> { + // 1. Dedicated file + let dedicated = project_root.join(".syncable.ci.toml"); + if dedicated.exists() { + let raw = std::fs::read_to_string(&dedicated)?; + let cfg: CiConfig = toml::from_str(&raw) + .map_err(|e| crate::error::IaCGeneratorError::Config( + crate::error::ConfigError::ParsingFailed(e.to_string()) + ))?; + return Ok(Some(cfg)); + } + + // 2. Shared file with [ci] table + let shared = project_root.join(".syncable.toml"); + if shared.exists() { + let raw = std::fs::read_to_string(&shared)?; + let wrapper: SyncableToml = toml::from_str(&raw) + .map_err(|e| crate::error::IaCGeneratorError::Config( + crate::error::ConfigError::ParsingFailed(e.to_string()) + ))?; + // Only return Some when at least one field was explicitly set + let cfg = wrapper.ci; + if cfg.platform.is_some() + || cfg.format.is_some() + || cfg.default_branch.is_some() + || cfg.extra_branches.is_some() + || cfg.test_command.is_some() + || cfg.build_command.is_some() + || cfg.skip_steps.is_some() + || cfg.env_prefix.is_some() + { + return Ok(Some(cfg)); + } + } + + Ok(None) +} + +// ── Merge ───────────────────────────────────────────────────────────────────── + +/// Applies `config` onto `ctx`, overwriting only the fields the config file +/// explicitly set. CLI flags are applied *after* this call and will win over +/// both detected values and config-file values. +pub fn merge_config_into_context(config: &CiConfig, ctx: &mut CiContext) { + if let Some(branch) = &config.default_branch { + ctx.default_branch = branch.clone(); + } + + if let Some(cmd) = &config.test_command { + // The test command lives inside the nested TestStep once the pipeline + // is built, but CiContext doesn't own that struct yet — store it in a + // dedicated field so the pipeline builder can pick it up. + ctx.config_test_command = Some(cmd.clone()); + } + + if let Some(cmd) = &config.build_command { + ctx.build_command = Some(cmd.clone()); + } + + if let Some(prefix) = &config.env_prefix { + ctx.env_prefix = Some(prefix.clone()); + } + + if let Some(skip) = &config.skip_steps { + ctx.skip_steps = skip.clone(); + } + + if let Some(extra) = &config.extra_branches { + ctx.extra_branches = extra.clone(); + } + + // platform / format overrides: convert string → enum, ignore unknown values + if let Some(p) = &config.platform { + if let Ok(platform) = parse_platform(p) { + ctx.platform = platform; + } + } + + if let Some(f) = &config.format { + if let Ok(format) = parse_format(f) { + ctx.format = format; + } + } +} + +// ── Helpers ─────────────────────────────────────────────────────────────────── + +fn parse_platform(s: &str) -> Result { + match s.to_lowercase().as_str() { + "azure" => Ok(CiPlatform::Azure), + "gcp" => Ok(CiPlatform::Gcp), + "hetzner" => Ok(CiPlatform::Hetzner), + _ => Err(()), + } +} + +fn parse_format(s: &str) -> Result { + match s.to_lowercase().replace('-', "_").as_str() { + "github_actions" | "githubactions" => Ok(CiFormat::GithubActions), + "azure_pipelines" | "azurepipelines" => Ok(CiFormat::AzurePipelines), + "cloud_build" | "cloudbuild" => Ok(CiFormat::CloudBuild), + _ => Err(()), + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + fn parse_config(toml_str: &str) -> CiConfig { + toml::from_str(toml_str).expect("should parse") + } + + #[test] + fn test_empty_toml_parses_to_all_none() { + let cfg = parse_config(""); + assert!(cfg.platform.is_none()); + assert!(cfg.default_branch.is_none()); + assert!(cfg.test_command.is_none()); + assert!(cfg.build_command.is_none()); + assert!(cfg.skip_steps.is_none()); + assert!(cfg.env_prefix.is_none()); + assert!(cfg.extra_branches.is_none()); + assert!(cfg.format.is_none()); + } + + #[test] + fn test_partial_toml_parses() { + let cfg = parse_config(r#" + platform = "gcp" + default_branch = "main" + "#); + assert_eq!(cfg.platform.as_deref(), Some("gcp")); + assert_eq!(cfg.default_branch.as_deref(), Some("main")); + assert!(cfg.test_command.is_none()); + } + + #[test] + fn test_full_toml_parses() { + let cfg = parse_config(r#" + platform = "azure" + format = "azure-pipelines" + default_branch = "main" + extra_branches = ["develop", "release/*"] + test_command = "npm run test:ci" + build_command = "npm run build" + skip_steps = ["lint"] + env_prefix = "MYAPP" + "#); + assert_eq!(cfg.platform.as_deref(), Some("azure")); + assert_eq!(cfg.format.as_deref(), Some("azure-pipelines")); + assert_eq!(cfg.default_branch.as_deref(), Some("main")); + let expected_branches: Vec = vec!["develop".to_string(), "release/*".to_string()]; + assert_eq!(cfg.extra_branches.as_deref(), Some(expected_branches.as_slice())); + assert_eq!(cfg.test_command.as_deref(), Some("npm run test:ci")); + assert_eq!(cfg.build_command.as_deref(), Some("npm run build")); + let expected_skip: Vec = vec!["lint".to_string()]; + assert_eq!(cfg.skip_steps.as_deref(), Some(expected_skip.as_slice())); + assert_eq!(cfg.env_prefix.as_deref(), Some("MYAPP")); + } + + #[test] + fn test_syncable_toml_wrapper_parses() { + let raw = r#" + [ci] + platform = "gcp" + test_command = "pytest" + "#; + let wrapper: SyncableToml = toml::from_str(raw).expect("should parse"); + assert_eq!(wrapper.ci.platform.as_deref(), Some("gcp")); + assert_eq!(wrapper.ci.test_command.as_deref(), Some("pytest")); + } + + #[test] + fn test_syncable_toml_no_ci_section_gives_empty() { + let raw = r#" + [other_section] + key = "value" + "#; + let wrapper: SyncableToml = toml::from_str(raw).expect("should parse"); + assert!(wrapper.ci.platform.is_none()); + } + + // ── merge tests ──────────────────────────────────────────────────────── + + fn make_context() -> CiContext { + use crate::generator::ci_generation::test_helpers::make_minimal_context; + make_minimal_context() + } + + #[test] + fn test_merge_default_branch() { + let cfg = parse_config(r#"default_branch = "develop""#); + let mut ctx = make_context(); + merge_config_into_context(&cfg, &mut ctx); + assert_eq!(ctx.default_branch, "develop"); + } + + #[test] + fn test_merge_does_not_overwrite_when_field_absent() { + let cfg = parse_config(""); + let mut ctx = make_context(); + let original_branch = ctx.default_branch.clone(); + merge_config_into_context(&cfg, &mut ctx); + assert_eq!(ctx.default_branch, original_branch); + } + + #[test] + fn test_merge_build_command() { + let cfg = parse_config(r#"build_command = "cargo build --release""#); + let mut ctx = make_context(); + merge_config_into_context(&cfg, &mut ctx); + assert_eq!(ctx.build_command.as_deref(), Some("cargo build --release")); + } + + #[test] + fn test_merge_test_command_stored_in_config_field() { + let cfg = parse_config(r#"test_command = "npx jest --ci""#); + let mut ctx = make_context(); + merge_config_into_context(&cfg, &mut ctx); + assert_eq!(ctx.config_test_command.as_deref(), Some("npx jest --ci")); + } + + #[test] + fn test_merge_skip_steps() { + let cfg = parse_config(r#"skip_steps = ["lint", "build"]"#); + let mut ctx = make_context(); + merge_config_into_context(&cfg, &mut ctx); + assert_eq!(ctx.skip_steps, vec!["lint", "build"]); + } + + #[test] + fn test_merge_platform_string_to_enum() { + let cfg = parse_config(r#"platform = "gcp""#); + let mut ctx = make_context(); + merge_config_into_context(&cfg, &mut ctx); + assert!(matches!(ctx.platform, CiPlatform::Gcp)); + } + + #[test] + fn test_merge_unknown_platform_ignored() { + let cfg = parse_config(r#"platform = "unknown-cloud""#); + let mut ctx = make_context(); + let original_platform = ctx.platform.clone(); + merge_config_into_context(&cfg, &mut ctx); + // platform unchanged because we can't parse it + assert_eq!( + std::mem::discriminant(&ctx.platform), + std::mem::discriminant(&original_platform) + ); + } + + #[test] + fn test_merge_format_normalises_hyphens() { + let cfg = parse_config(r#"format = "github-actions""#); + let mut ctx = make_context(); + merge_config_into_context(&cfg, &mut ctx); + assert!(matches!(ctx.format, CiFormat::GithubActions)); + } + + #[test] + fn test_merge_extra_branches() { + let cfg = parse_config(r#"extra_branches = ["develop"]"#); + let mut ctx = make_context(); + merge_config_into_context(&cfg, &mut ctx); + assert_eq!(ctx.extra_branches, vec!["develop"]); + } + + #[test] + fn test_deserialize_env_prefix_and_platform() { + let raw = r#" + platform = "hetzner" + env_prefix = "APP" + "#; + let cfg: CiConfig = toml::from_str(raw).unwrap(); + assert_eq!(cfg.platform.as_deref(), Some("hetzner")); + assert_eq!(cfg.env_prefix.as_deref(), Some("APP")); + } +} diff --git a/src/generator/ci_generation/context.rs b/src/generator/ci_generation/context.rs new file mode 100644 index 00000000..f4f14c7c --- /dev/null +++ b/src/generator/ci_generation/context.rs @@ -0,0 +1,470 @@ +//! CI-02 — `CiContext` and `collect_ci_context` entry point. + +use std::collections::HashMap; +use std::path::{Path, PathBuf}; +use std::process::Command; + +use std::fmt; + +use serde::Serialize; + +use crate::analyzer::{analyze_monorepo, analyze_project, ProjectAnalysis, TechnologyCategory}; +use crate::cli::{CiFormat, CiPlatform}; + +// ── Domain enums ───────────────────────────────────────────────────────────── + +/// Package manager detected for the primary language. +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +pub enum PackageManager { + Npm, + Yarn, + Pnpm, + Bun, + Pip, + Poetry, + Uv, + Cargo, + GoMod, + Maven, + Gradle, + Unknown, +} + +impl From<&str> for PackageManager { + fn from(s: &str) -> Self { + match s.to_lowercase().as_str() { + "npm" => Self::Npm, + "yarn" => Self::Yarn, + "pnpm" => Self::Pnpm, + "bun" => Self::Bun, + "pip" => Self::Pip, + "poetry" => Self::Poetry, + "uv" => Self::Uv, + "cargo" => Self::Cargo, + "go mod" | "gomod" | "go" => Self::GoMod, + "maven" | "mvn" => Self::Maven, + "gradle" => Self::Gradle, + _ => Self::Unknown, + } + } +} + +impl fmt::Display for PackageManager { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match self { + Self::Npm => "npm", + Self::Yarn => "yarn", + Self::Pnpm => "pnpm", + Self::Bun => "bun", + Self::Pip => "pip", + Self::Poetry => "poetry", + Self::Uv => "uv", + Self::Cargo => "cargo", + Self::GoMod => "go mod", + Self::Maven => "maven", + Self::Gradle => "gradle", + Self::Unknown => "unknown", + }; + write!(f, "{}", s) + } +} + +/// Test framework detected in the project. +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +pub enum TestFramework { + Jest, + Vitest, + Mocha, + Pytest, + CargoTest, + GoTest, + JunitMaven, + JunitGradle, + Unknown, +} + +impl From<&str> for TestFramework { + fn from(s: &str) -> Self { + match s.to_lowercase().as_str() { + "jest" => Self::Jest, + "vitest" => Self::Vitest, + "mocha" => Self::Mocha, + "pytest" => Self::Pytest, + "cargo test" | "cargo-test" | "cargotest" => Self::CargoTest, + "go test" | "gotest" => Self::GoTest, + "junit" | "junit-maven" | "junit (maven)" => Self::JunitMaven, + "junit-gradle" | "junit (gradle)" => Self::JunitGradle, + _ => Self::Unknown, + } + } +} + +impl fmt::Display for TestFramework { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match self { + Self::Jest => "jest", + Self::Vitest => "vitest", + Self::Mocha => "mocha", + Self::Pytest => "pytest", + Self::CargoTest => "cargo test", + Self::GoTest => "go test", + Self::JunitMaven => "junit (maven)", + Self::JunitGradle => "junit (gradle)", + Self::Unknown => "unknown", + }; + write!(f, "{}", s) + } +} + +/// Linter or formatter detected in the project. +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +pub enum Linter { + Eslint, + Prettier, + Pylint, + Ruff, + Clippy, + GolangciLint, + Checkstyle, + Ktlint, + None, +} + +impl From<&str> for Linter { + fn from(s: &str) -> Self { + match s.to_lowercase().as_str() { + "eslint" => Self::Eslint, + "prettier" => Self::Prettier, + "pylint" => Self::Pylint, + "ruff" => Self::Ruff, + "clippy" | "cargo clippy" => Self::Clippy, + "golangci-lint" | "golangci_lint" | "golangci lint" => Self::GolangciLint, + "checkstyle" => Self::Checkstyle, + "ktlint" => Self::Ktlint, + _ => Self::None, + } + } +} + +impl fmt::Display for Linter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match self { + Self::Eslint => "eslint", + Self::Prettier => "prettier", + Self::Pylint => "pylint", + Self::Ruff => "ruff", + Self::Clippy => "clippy", + Self::GolangciLint => "golangci-lint", + Self::Checkstyle => "checkstyle", + Self::Ktlint => "ktlint", + Self::None => "", + }; + write!(f, "{}", s) + } +} + +// ── Primary struct ──────────────────────────────────────────────────────────── + +/// Enriched snapshot of a project consumed by all CI generators. +#[derive(Debug, Clone, Serialize)] +pub struct CiContext { + /// Raw analyzer output; available to generators that need fields beyond what CiContext promotes. + pub analysis: ProjectAnalysis, + pub primary_language: String, + /// language name → version string + pub runtime_versions: HashMap, + pub package_manager: PackageManager, + /// Absolute path to the detected lock file, if present. + pub lock_file: Option, + pub test_framework: Option, + pub linter: Option, + /// Command from the default `BuildScript`, if any. + pub build_command: Option, + pub has_dockerfile: bool, + pub monorepo: bool, + /// Sub-package directory names; empty for single-project repos. + pub monorepo_packages: Vec, + pub default_branch: String, + pub platform: CiPlatform, + pub format: CiFormat, + pub project_name: String, + /// Test command override from `.syncable.ci.toml` (CI-22). + pub config_test_command: Option, + /// Env/secret variable name prefix override from config or CLI. + pub env_prefix: Option, + /// Step names to skip (from config file). + pub skip_steps: Vec, + /// Extra push/PR branches from config file. + pub extra_branches: Vec, +} + +// ── Helper functions ────────────────────────────────────────────────────────── + +/// Returns the upstream default branch via `git symbolic-ref`; falls back to `"main"`. +fn detect_default_branch(path: &Path) -> String { + let output = Command::new("git") + .args(["symbolic-ref", "refs/remotes/origin/HEAD"]) + .current_dir(path) + .output(); + + match output { + Ok(out) if out.status.success() => { + let raw = String::from_utf8_lossy(&out.stdout); + raw.trim() + .rsplit('/') + .next() + .unwrap_or("main") + .to_string() + } + _ => "main".to_string(), + } +} + +/// Returns the first matching lock file path for the given package manager. +fn detect_lock_file(project_root: &Path, pm: &PackageManager) -> Option { + let candidates: &[&str] = match pm { + PackageManager::Npm => &["package-lock.json"], + PackageManager::Yarn => &["yarn.lock"], + PackageManager::Pnpm => &["pnpm-lock.yaml"], + PackageManager::Bun => &["bun.lockb", "bun.lock"], + PackageManager::Pip => &["requirements.txt", "requirements-lock.txt"], + PackageManager::Poetry => &["poetry.lock"], + PackageManager::Uv => &["uv.lock"], + PackageManager::Cargo => &["Cargo.lock"], + PackageManager::GoMod => &["go.sum"], + PackageManager::Maven => &[], + PackageManager::Gradle => &[], + PackageManager::Unknown => &[], + }; + + candidates.iter().find_map(|name| { + let p = project_root.join(name); + p.exists().then_some(p) + }) +} + +/// Returns the project root's directory name as the project identifier. +fn detect_project_name(analysis: &ProjectAnalysis) -> String { + analysis + .project_root + .file_name() + .map(|n| n.to_string_lossy().into_owned()) + .unwrap_or_else(|| "project".to_string()) +} + +/// Checks for a canonical manifest file directly at `project_root` and returns +/// the language name that should take priority over confidence-score ranking. +/// +/// Manifests are tested in priority order so compiled/backend languages always +/// win over a companion `package.json` that lives in a sub-directory but gets +/// scanned by the project analyzer. +fn detect_root_manifest_language(project_root: &Path) -> Option<&'static str> { + const MANIFESTS: &[(&str, &str)] = &[ + ("Cargo.toml", "Rust"), + ("go.mod", "Go"), + ("pyproject.toml", "Python"), + ("setup.py", "Python"), + ("requirements.txt", "Python"), + ("pom.xml", "Java"), + ("build.gradle", "Java"), + ("build.gradle.kts", "Kotlin"), + ("package.json", "TypeScript"), + ]; + MANIFESTS.iter().find_map(|(file, lang)| project_root.join(file).exists().then_some(*lang)) +} + +/// Returns `true` when `tf` is a reasonable test framework for `language`. +/// Used to discard cross-language detections (e.g. Vitest when primary is Rust). +fn test_framework_matches_language(language: &str, tf: &TestFramework) -> bool { + match language.to_lowercase().as_str() { + "typescript" | "javascript" => { + matches!(tf, TestFramework::Jest | TestFramework::Vitest | TestFramework::Mocha) + } + "python" => matches!(tf, TestFramework::Pytest), + "rust" => matches!(tf, TestFramework::CargoTest), + "go" => matches!(tf, TestFramework::GoTest), + "java" | "kotlin" => { + matches!(tf, TestFramework::JunitMaven | TestFramework::JunitGradle) + } + _ => true, + } +} + +/// Returns `true` when `linter` is appropriate for `language`. +fn linter_matches_language(language: &str, linter: &Linter) -> bool { + match language.to_lowercase().as_str() { + "typescript" | "javascript" => matches!(linter, Linter::Eslint | Linter::Prettier), + "python" => matches!(linter, Linter::Pylint | Linter::Ruff), + "rust" => matches!(linter, Linter::Clippy), + "go" => matches!(linter, Linter::GolangciLint), + "java" => matches!(linter, Linter::Checkstyle), + "kotlin" => matches!(linter, Linter::Ktlint), + _ => true, + } +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Runs the project analyzer and assembles a `CiContext` for the given path. +pub fn collect_ci_context( + path: &Path, + platform: CiPlatform, + format: CiFormat, +) -> crate::Result { + let analysis = analyze_project(path)?; + + // ── Primary language ────────────────────────────────────────────────── + // Prefer the language whose manifest lives directly at the project root so + // a companion sub-project (e.g. a TypeScript IDE extension in a sub-dir) + // cannot outrank the primary manifest by raw file-count confidence alone. + let primary_language = detect_root_manifest_language(&analysis.project_root) + .map(|s| s.to_string()) + .or_else(|| { + analysis + .languages + .iter() + .max_by(|a, b| { + a.confidence + .partial_cmp(&b.confidence) + .unwrap_or(std::cmp::Ordering::Equal) + }) + .map(|l| l.name.clone()) + }) + .unwrap_or_else(|| "unknown".to_string()); + + // ── Runtime versions ────────────────────────────────────────────────── + let runtime_versions: HashMap = analysis + .languages + .iter() + .filter_map(|l| l.version.as_ref().map(|v| (l.name.clone(), v.clone()))) + .collect(); + + // ── Package manager ─────────────────────────────────────────────────── + // Look up the package manager from the root language's DetectedLanguage + // entry so the sub-project's manager does not override the primary one. + let package_manager = analysis + .languages + .iter() + .find(|l| l.name.to_lowercase() == primary_language.to_lowercase()) + .and_then(|l| l.package_manager.as_deref()) + .map(PackageManager::from) + .or_else(|| { + analysis + .languages + .iter() + .max_by(|a, b| { + a.confidence + .partial_cmp(&b.confidence) + .unwrap_or(std::cmp::Ordering::Equal) + }) + .and_then(|l| l.package_manager.as_deref()) + .map(PackageManager::from) + }) + .unwrap_or(PackageManager::Unknown); + + let lock_file = detect_lock_file(&analysis.project_root, &package_manager); + + // ── Test framework ──────────────────────────────────────────────────── + // Filter to frameworks belonging to the primary language so a Vitest + // detection in a companion sub-project does not shadow `cargo test`. + let test_framework = analysis + .technologies + .iter() + .filter(|t| t.category == TechnologyCategory::Testing) + .max_by(|a, b| a.confidence.partial_cmp(&b.confidence).unwrap_or(std::cmp::Ordering::Equal)) + .map(|t| TestFramework::from(t.name.as_str())) + .filter(|tf| *tf != TestFramework::Unknown) + .filter(|tf| test_framework_matches_language(&primary_language, tf)) + // cargo test is always available even without an explicit tech entry. + .or_else(|| { + if primary_language.to_lowercase() == "rust" { + Some(TestFramework::CargoTest) + } else { + None + } + }); + + // ── Linter ──────────────────────────────────────────────────────────── + // Apply the same root-language filter so a detected eslint from a companion + // project does not suppress clippy for a Rust workspace. + let linter_tech = analysis.technologies.iter().find(|t| { + matches!( + t.name.to_lowercase().as_str(), + "eslint" + | "prettier" + | "pylint" + | "ruff" + | "clippy" + | "golangci-lint" + | "checkstyle" + | "ktlint" + ) + }); + let linter = linter_tech + .map(|t| Linter::from(t.name.as_str())) + .filter(|l| *l != Linter::None) + .filter(|l| linter_matches_language(&primary_language, l)) + // Clippy is always available for Rust projects. + .or_else(|| { + if primary_language.to_lowercase() == "rust" { + Some(Linter::Clippy) + } else { + None + } + }); + + // ── Build command ───────────────────────────────────────────────────── + let build_command = analysis + .build_scripts + .iter() + .find(|s| s.is_default) + .map(|s| s.command.clone()); + + // ── Dockerfile ──────────────────────────────────────────────────────── + let has_dockerfile = analysis.docker_analysis.is_some(); + + // ── Monorepo ────────────────────────────────────────────────────────── + let mono = analyze_monorepo(path)?; + let monorepo = mono.is_monorepo; + let monorepo_packages = if monorepo { + mono.projects + .iter() + .filter_map(|p| { + p.analysis + .project_root + .file_name() + .map(|n| n.to_string_lossy().into_owned()) + }) + .collect() + } else { + Vec::new() + }; + + // ── Git default branch ──────────────────────────────────────────────── + let default_branch = detect_default_branch(path); + + // ── Project name ────────────────────────────────────────────────────── + let project_name = detect_project_name(&analysis); + + Ok(CiContext { + analysis, + primary_language, + runtime_versions, + package_manager, + lock_file, + test_framework, + linter, + build_command, + has_dockerfile, + monorepo, + monorepo_packages, + default_branch, + platform, + format, + project_name, + config_test_command: None, + env_prefix: None, + skip_steps: vec![], + extra_branches: vec![], + }) +} diff --git a/src/generator/ci_generation/coverage_step.rs b/src/generator/ci_generation/coverage_step.rs new file mode 100644 index 00000000..a0b99082 --- /dev/null +++ b/src/generator/ci_generation/coverage_step.rs @@ -0,0 +1,248 @@ +//! CI-23 — Code Coverage Upload Step +//! +//! Optional step emitted when the detected test runner produces a coverage +//! report (i.e. `TestStep.coverage_report_path` is `Some(_)`). +//! +//! ## Supported services +//! +//! | Service | YAML emitted | Secret required | +//! |------------------|----------------------------------------------|------------------| +//! | `Codecov` | `codecov/codecov-action@v4` | `CODECOV_TOKEN` | +//! | `InlineSummary` | `github-script` writing to job summary | none | +//! +//! `generate_coverage_step` returns `None` when there is no coverage report +//! path, signalling the template builder to omit the step entirely. +//! When `Codecov` is chosen, `CODECOV_TOKEN` is published as an optional +//! entry in `SECRETS_REQUIRED.md`. + +use crate::generator::ci_generation::schema::TestStep; + +// ── Public types ────────────────────────────────────────────────────────────── + +/// Which coverage reporting service to target. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum CoverageService { + /// Upload to Codecov using `codecov/codecov-action@v4`. + Codecov, + /// Write coverage numbers inline to the GitHub Actions job summary — + /// no external service, no extra secret. + InlineSummary, +} + +/// A resolved coverage upload step, ready for YAML rendering. +#[derive(Debug, Clone)] +pub struct CoverageStep { + pub service: CoverageService, + /// Path to the coverage report file (relative to workspace root). + pub report_path: String, + /// The secret name that must be configured in the repository when + /// `service == Codecov`. Empty for `InlineSummary`. + pub token_secret: String, +} + +impl CoverageStep { + /// Returns `true` when a repository secret must be configured before the + /// workflow can succeed. + pub fn requires_secret(&self) -> bool { + !self.token_secret.is_empty() + } +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Generates a coverage step from the detected `TestStep`, choosing the +/// default service (`Codecov`) when a coverage report path is present. +/// +/// Returns `None` when `test.coverage_report_path` is `None`, which instructs +/// the template builder to omit the step. +pub fn generate_coverage_step(test: &TestStep) -> Option { + generate_coverage_step_for(test, CoverageService::Codecov) +} + +/// Same as `generate_coverage_step` but lets the caller choose the service. +/// Primarily used for testing the `InlineSummary` path. +pub fn generate_coverage_step_for( + test: &TestStep, + service: CoverageService, +) -> Option { + let report_path = test.coverage_report_path.as_ref()?.clone(); + let token_secret = match service { + CoverageService::Codecov => "CODECOV_TOKEN".to_string(), + CoverageService::InlineSummary => String::new(), + }; + Some(CoverageStep { service, report_path, token_secret }) +} + +/// Renders the coverage step as a GitHub Actions YAML step snippet. +pub fn render_coverage_yaml(step: &CoverageStep) -> String { + match step.service { + CoverageService::Codecov => format!( + "\ + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + with: + files: {} + fail_ci_if_error: false + env: + CODECOV_TOKEN: ${{{{ secrets.CODECOV_TOKEN }}}}\n", + step.report_path + ), + + CoverageService::InlineSummary => format!( + "\ + - name: Coverage summary + if: always() + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const report = fs.existsSync('{}') + ? fs.readFileSync('{}', 'utf8').slice(0, 2000) + : 'Coverage report not found.'; + await core.summary.addRaw('## Coverage\\n```\\n' + report + '\\n```').write();\n", + step.report_path, step.report_path + ), + } +} + +/// Renders the `CODECOV_TOKEN` entry for `SECRETS_REQUIRED.md`. +/// Returns an empty string for `InlineSummary` (no secret needed). +pub fn coverage_secrets_doc_entry(step: &CoverageStep) -> String { + if step.service != CoverageService::Codecov { + return String::new(); + } + "\ +### `CODECOV_TOKEN` *(optional)* + +Upload coverage reports to Codecov. + +**Where to set:** Repository → Settings → Secrets and variables → Actions + +**How to obtain:** → your repo → Settings → Repository Token\n" + .to_string() +} + +// ── Tests ────────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::generator::ci_generation::schema::TestStep; + + fn test_step_with_coverage(path: &str) -> TestStep { + TestStep { + command: "cargo test".into(), + coverage_flag: Some("--coverage".into()), + coverage_report_path: Some(path.to_string()), + } + } + + fn test_step_no_coverage() -> TestStep { + TestStep { + command: "cargo test".into(), + coverage_flag: None, + coverage_report_path: None, + } + } + + // ── generate_coverage_step ───────────────────────────────────────── + + #[test] + fn test_returns_none_without_coverage_path() { + assert!(generate_coverage_step(&test_step_no_coverage()).is_none()); + } + + #[test] + fn test_returns_some_with_coverage_path() { + let step = generate_coverage_step(&test_step_with_coverage("coverage.xml")); + assert!(step.is_some()); + } + + #[test] + fn test_defaults_to_codecov_service() { + let step = generate_coverage_step(&test_step_with_coverage("coverage.xml")).unwrap(); + assert_eq!(step.service, CoverageService::Codecov); + } + + #[test] + fn test_codecov_requires_secret() { + let step = generate_coverage_step(&test_step_with_coverage("lcov.info")).unwrap(); + assert!(step.requires_secret()); + assert_eq!(step.token_secret, "CODECOV_TOKEN"); + } + + #[test] + fn test_inline_summary_does_not_require_secret() { + let step = generate_coverage_step_for( + &test_step_with_coverage("lcov.info"), + CoverageService::InlineSummary, + ) + .unwrap(); + assert!(!step.requires_secret()); + assert!(step.token_secret.is_empty()); + } + + #[test] + fn test_report_path_preserved() { + let step = + generate_coverage_step(&test_step_with_coverage("target/coverage/lcov.info")).unwrap(); + assert_eq!(step.report_path, "target/coverage/lcov.info"); + } + + // ── render_coverage_yaml ─────────────────────────────────────────── + + #[test] + fn test_codecov_yaml_contains_action() { + let step = generate_coverage_step(&test_step_with_coverage("coverage.xml")).unwrap(); + let yaml = render_coverage_yaml(&step); + assert!(yaml.contains("codecov/codecov-action@v4")); + } + + #[test] + fn test_codecov_yaml_contains_report_path() { + let step = generate_coverage_step(&test_step_with_coverage("coverage.xml")).unwrap(); + let yaml = render_coverage_yaml(&step); + assert!(yaml.contains("coverage.xml")); + } + + #[test] + fn test_codecov_yaml_contains_secret_ref() { + let step = generate_coverage_step(&test_step_with_coverage("coverage.xml")).unwrap(); + let yaml = render_coverage_yaml(&step); + assert!(yaml.contains("CODECOV_TOKEN")); + } + + #[test] + fn test_inline_summary_yaml_uses_github_script() { + let step = generate_coverage_step_for( + &test_step_with_coverage("lcov.info"), + CoverageService::InlineSummary, + ) + .unwrap(); + let yaml = render_coverage_yaml(&step); + assert!(yaml.contains("github-script")); + assert!(yaml.contains("lcov.info")); + assert!(!yaml.contains("CODECOV_TOKEN")); + } + + // ── coverage_secrets_doc_entry ───────────────────────────────────── + + #[test] + fn test_secrets_doc_entry_for_codecov() { + let step = generate_coverage_step(&test_step_with_coverage("coverage.xml")).unwrap(); + let entry = coverage_secrets_doc_entry(&step); + assert!(entry.contains("CODECOV_TOKEN")); + assert!(entry.contains("optional")); + } + + #[test] + fn test_secrets_doc_entry_empty_for_inline() { + let step = generate_coverage_step_for( + &test_step_with_coverage("lcov.info"), + CoverageService::InlineSummary, + ) + .unwrap(); + let entry = coverage_secrets_doc_entry(&step); + assert!(entry.is_empty()); + } +} diff --git a/src/generator/ci_generation/docker_step.rs b/src/generator/ci_generation/docker_step.rs new file mode 100644 index 00000000..9106a8a8 --- /dev/null +++ b/src/generator/ci_generation/docker_step.rs @@ -0,0 +1,146 @@ +//! Docker Build & Tag Step Generator — CI-08 +//! +//! Emitted only when `CiContext.has_dockerfile` is true. +//! Produces a `DockerBuildStep` with placeholder tokens for registry and image +//! name that are resolved by the token engine or wired in by the CD generator. + +use crate::cli::CiPlatform; +use crate::generator::ci_generation::{context::CiContext, schema::DockerBuildStep}; + +/// Returns `Some(DockerBuildStep)` when a Dockerfile is present, `None` otherwise. +/// +/// The image tag is built from two unresolved placeholders plus the GitHub +/// Actions expression for the commit SHA, which is always available at runtime: +/// `{{REGISTRY_URL}}/{{IMAGE_NAME}}:${{ github.sha }}` +pub fn generate_docker_step(ctx: &CiContext) -> Option { + if !ctx.has_dockerfile { + return None; + } + + // The commit SHA expression differs per CI platform. + let sha_expr = match ctx.platform { + CiPlatform::Azure => "$(Build.SourceVersion)", + CiPlatform::Gcp => "$SHORT_SHA", + _ => "${{ github.sha }}", + }; + Some(DockerBuildStep { + image_tag: format!("{{{{REGISTRY_URL}}}}/{{{{IMAGE_NAME}}}}:{sha_expr}"), + push: false, + qemu: false, + buildx: true, + }) +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::cli::CiPlatform; + use crate::generator::ci_generation::{context::CiContext, test_helpers::make_base_ctx}; + use tempfile::TempDir; + + fn ctx_with_dockerfile(has: bool) -> (CiContext, TempDir) { + let dir = TempDir::new().unwrap(); + let ctx = CiContext { has_dockerfile: has, ..make_base_ctx(dir.path(), "") }; + (ctx, dir) + } + + #[test] + fn test_no_dockerfile_returns_none() { + let (ctx, _dir) = ctx_with_dockerfile(false); + assert!(generate_docker_step(&ctx).is_none()); + } + + #[test] + fn test_dockerfile_present_returns_some() { + let (ctx, _dir) = ctx_with_dockerfile(true); + assert!(generate_docker_step(&ctx).is_some()); + } + + #[test] + fn test_image_tag_contains_registry_placeholder() { + let (ctx, _dir) = ctx_with_dockerfile(true); + let step = generate_docker_step(&ctx).unwrap(); + assert!(step.image_tag.contains("{{REGISTRY_URL}}")); + } + + #[test] + fn test_image_tag_contains_image_name_placeholder() { + let (ctx, _dir) = ctx_with_dockerfile(true); + let step = generate_docker_step(&ctx).unwrap(); + assert!(step.image_tag.contains("{{IMAGE_NAME}}")); + } + + #[test] + fn test_image_tag_github_actions_uses_github_sha() { + let dir = TempDir::new().unwrap(); + let ctx = CiContext { + has_dockerfile: true, + platform: CiPlatform::Hetzner, + ..make_base_ctx(dir.path(), "") + }; + let step = generate_docker_step(&ctx).unwrap(); + assert!(step.image_tag.contains("${{ github.sha }}")); + } + + #[test] + fn test_image_tag_azure_uses_build_source_version() { + let dir = TempDir::new().unwrap(); + let ctx = CiContext { + has_dockerfile: true, + platform: CiPlatform::Azure, + ..make_base_ctx(dir.path(), "") + }; + let step = generate_docker_step(&ctx).unwrap(); + assert!(step.image_tag.contains("$(Build.SourceVersion)")); + } + + #[test] + fn test_image_tag_gcp_uses_short_sha() { + let dir = TempDir::new().unwrap(); + let ctx = CiContext { + has_dockerfile: true, + platform: CiPlatform::Gcp, + ..make_base_ctx(dir.path(), "") + }; + let step = generate_docker_step(&ctx).unwrap(); + assert!(step.image_tag.contains("$SHORT_SHA")); + } + + #[test] + fn test_push_defaults_to_false() { + let (ctx, _dir) = ctx_with_dockerfile(true); + let step = generate_docker_step(&ctx).unwrap(); + assert!(!step.push); + } + + #[test] + fn test_buildx_defaults_to_true() { + let (ctx, _dir) = ctx_with_dockerfile(true); + let step = generate_docker_step(&ctx).unwrap(); + assert!(step.buildx); + } + + #[test] + fn test_qemu_defaults_to_false() { + let (ctx, _dir) = ctx_with_dockerfile(true); + let step = generate_docker_step(&ctx).unwrap(); + assert!(!step.qemu); + } + + #[test] + fn test_full_image_tag_format_hetzner() { + let dir = TempDir::new().unwrap(); + let ctx = CiContext { + has_dockerfile: true, + platform: CiPlatform::Hetzner, + ..make_base_ctx(dir.path(), "") + }; + let step = generate_docker_step(&ctx).unwrap(); + assert_eq!( + step.image_tag, + "{{REGISTRY_URL}}/{{IMAGE_NAME}}:${{ github.sha }}" + ); + } +} diff --git a/src/generator/ci_generation/dry_run.rs b/src/generator/ci_generation/dry_run.rs new file mode 100644 index 00000000..d464450b --- /dev/null +++ b/src/generator/ci_generation/dry_run.rs @@ -0,0 +1,232 @@ +//! CI-21 — Dry-Run & Pretty-Print Mode +//! +//! Renders all generated CI files and metadata to a `String` without touching +//! the filesystem. The handler calls `print_dry_run` which delegates to +//! `render_dry_run` — keeping the rendering logic pure and testable. +//! +//! ## Output sections +//! +//! 1. **Header** — banner stating no files will be written. +//! 2. **File blocks** — for each `CiFile`: the would-create path, then the +//! full content surrounded by faint separators. +//! 3. **Unresolved token table** — only emitted when tokens remain. +//! 4. **Summary line** — N files, M tokens unresolved. + +use std::path::Path; + +use colored::Colorize; + +use crate::generator::ci_generation::{ + schema::CiPipeline, + writer::{resolve_path, CiFile}, +}; + +// ── Public API ───────────────────────────────────────────────────────────────── + +/// Renders the dry-run output and prints it to stdout. +pub fn print_dry_run(files: &[CiFile], pipeline: &CiPipeline, output_dir: &Path) { + print!("{}", render_dry_run(files, pipeline, output_dir)); +} + +/// Renders the dry-run output to a `String`. +/// +/// Pure function — no I/O, fully testable. +pub fn render_dry_run(files: &[CiFile], pipeline: &CiPipeline, output_dir: &Path) -> String { + let mut out = String::new(); + + // ── Header ──────────────────────────────────────────────────────────── + out.push_str(&format!( + "\n{}\n{}\n{}\n\n", + "╭─ Dry Run ─ no files will be written ─────────────────────────────╮" + .bright_cyan() + .bold(), + format!( + "│ {} file{} would be generated │", + files.len(), + if files.len() == 1 { "" } else { "s" } + ) + .bright_cyan(), + "╰───────────────────────────────────────────────────────────────────╯" + .bright_cyan() + .bold(), + )); + + // ── File blocks ─────────────────────────────────────────────────────── + let sep = "─".repeat(68); + for file in files { + let path = resolve_path(output_dir, &file.kind); + out.push_str(&format!( + " {} {}\n", + "Would create:".dimmed(), + path.display().to_string().cyan().bold(), + )); + out.push_str(&format!("{}\n", sep.dimmed())); + out.push_str(&file.content); + if !file.content.ends_with('\n') { + out.push('\n'); + } + out.push_str(&format!("{}\n\n", sep.dimmed())); + } + + // ── Unresolved token table ──────────────────────────────────────────── + if !pipeline.unresolved_tokens.is_empty() { + out.push_str(&format!( + "{}\n", + "╭─ Unresolved Tokens ───────────────────────────────────────────────╮" + .yellow() + .bold() + )); + out.push_str(&format!( + "│ {:<28} {:<20} {}\n", + "Token".yellow().bold(), + "Placeholder".yellow().bold(), + "Hint".yellow().bold(), + )); + out.push_str(&format!("│ {}\n", "─".repeat(64).dimmed())); + + for token in &pipeline.unresolved_tokens { + out.push_str(&format!( + "│ {:<28} {:<20} {}\n", + token.name.as_str().bright_white(), + token.placeholder.as_str().bright_yellow(), + token.hint.as_str().dimmed(), + )); + } + out.push_str(&format!( + "{}\n\n", + "╰───────────────────────────────────────────────────────────────────╯" + .yellow() + .bold() + )); + } + + // ── Summary line ────────────────────────────────────────────────────── + let token_count = pipeline.unresolved_tokens.len(); + let summary = format!( + " {} {} file{} to write • {} unresolved token{}", + "→".bright_cyan(), + files.len(), + if files.len() == 1 { "" } else { "s" }, + token_count, + if token_count == 1 { "" } else { "s" }, + ); + out.push_str(&format!("{}\n\n", summary)); + + out +} + +// ── Tests ────────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::cli::{CiFormat, CiPlatform}; + use crate::generator::ci_generation::{ + schema::{ + ArtifactStep, BuildStep, CacheStep, CiPipeline, DockerBuildStep, ImageScanStep, + InstallStep, LintStep, RuntimeStep, SecretScanStep, TestStep, TriggerConfig, + UnresolvedToken, + }, + writer::CiFileKind, + }; + + fn make_pipeline(unresolved: Vec) -> CiPipeline { + CiPipeline { + project_name: "test-project".into(), + platform: CiPlatform::Hetzner, + format: CiFormat::GithubActions, + triggers: TriggerConfig { + push_branches: vec!["main".into()], + pr_branches: vec!["main".into()], + tag_pattern: None, + scheduled: None, + }, + runtime: RuntimeStep { + action: "actions/setup-node@v4".into(), + version: "20".into(), + }, + cache: None, + install: InstallStep { command: "npm ci".into() }, + lint: None, + test: TestStep { + command: "npm test".into(), + coverage_flag: None, + coverage_report_path: None, + }, + build: None, + docker_build: None, + image_scan: None, + secret_scan: SecretScanStep { + github_token_expr: "${{ secrets.GITHUB_TOKEN }}".into(), + gitleaks_license_secret: None, + }, + upload_artifact: None, + unresolved_tokens: unresolved, + } + } + + const YAML: &str = "name: CI\non:\n push:\n branches: [main]\n"; + + fn make_files() -> Vec { + vec![CiFile::pipeline(YAML.to_string(), CiFormat::GithubActions)] + } + + #[test] + fn test_render_contains_would_create_path() { + let rendered = render_dry_run(&make_files(), &make_pipeline(vec![]), Path::new("/proj")); + assert!(rendered.contains("Would create:") || rendered.contains("would create:") || rendered.contains(".github/workflows/ci.yml")); + } + + #[test] + fn test_render_contains_file_content() { + let rendered = render_dry_run(&make_files(), &make_pipeline(vec![]), Path::new("/proj")); + assert!(rendered.contains("name: CI")); + } + + #[test] + fn test_render_no_tokens_section_when_all_resolved() { + let rendered = render_dry_run(&make_files(), &make_pipeline(vec![]), Path::new("/proj")); + assert!(!rendered.contains("Unresolved Tokens")); + } + + #[test] + fn test_render_shows_token_table_when_unresolved() { + let tokens = vec![ + UnresolvedToken::new("REGISTRY_URL", "Your container registry base URL", "url"), + ]; + let rendered = render_dry_run(&make_files(), &make_pipeline(tokens), Path::new("/proj")); + assert!(rendered.contains("Unresolved Tokens")); + assert!(rendered.contains("REGISTRY_URL")); + assert!(rendered.contains("{{REGISTRY_URL}}")); + } + + #[test] + fn test_render_summary_counts_files() { + let rendered = render_dry_run(&make_files(), &make_pipeline(vec![]), Path::new("/proj")); + assert!(rendered.contains("1 file")); + } + + #[test] + fn test_render_multiple_files() { + let files = vec![ + CiFile::pipeline(YAML.to_string(), CiFormat::GithubActions), + CiFile::secrets_doc("# Secrets\n".to_string()), + ]; + let rendered = render_dry_run(&files, &make_pipeline(vec![]), Path::new("/proj")); + assert!(rendered.contains("2 files")); + assert!(rendered.contains("SECRETS_REQUIRED.md")); + } + + #[test] + fn test_render_zero_unresolved_label() { + let rendered = render_dry_run(&make_files(), &make_pipeline(vec![]), Path::new("/proj")); + assert!(rendered.contains("0 unresolved tokens")); + } + + #[test] + fn test_render_singular_token_label() { + let tokens = vec![UnresolvedToken::new("FOO", "hint", "string")]; + let rendered = render_dry_run(&make_files(), &make_pipeline(tokens), Path::new("/proj")); + assert!(rendered.contains("1 unresolved token")); + } +} diff --git a/src/generator/ci_generation/image_scan_step.rs b/src/generator/ci_generation/image_scan_step.rs new file mode 100644 index 00000000..45a3f069 --- /dev/null +++ b/src/generator/ci_generation/image_scan_step.rs @@ -0,0 +1,96 @@ +//! Container Image Security Scan Step Generator — CI-09 +//! +//! Emitted only when a Docker build step is present. Takes the output of +//! `generate_docker_step` directly — the dependency is encoded in the type: +//! `Option` in, `Option` out. + +use crate::generator::ci_generation::schema::{DockerBuildStep, ImageScanStep}; + +/// Returns `Some(ImageScanStep)` when a Docker build step exists, `None` otherwise. +/// +/// The scan targets the same image reference produced by the Docker build step, +/// failing the job on any CRITICAL or HIGH severity finding. +pub fn generate_image_scan_step(docker: &Option) -> Option { + docker.as_ref().map(|d| ImageScanStep { + image_ref: d.image_tag.clone(), + fail_on_severity: "CRITICAL,HIGH".to_string(), + format: "sarif".to_string(), + output: "trivy-results.sarif".to_string(), + upload_sarif: true, + }) +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::generator::ci_generation::schema::DockerBuildStep; + + fn make_docker_step() -> DockerBuildStep { + DockerBuildStep { + image_tag: "{{REGISTRY_URL}}/{{IMAGE_NAME}}:${{ github.sha }}".to_string(), + push: false, + qemu: false, + buildx: true, + } + } + + #[test] + fn test_none_docker_yields_none_scan() { + assert!(generate_image_scan_step(&None).is_none()); + } + + #[test] + fn test_some_docker_yields_some_scan() { + let docker = Some(make_docker_step()); + assert!(generate_image_scan_step(&docker).is_some()); + } + + #[test] + fn test_image_ref_matches_docker_tag() { + let docker = Some(make_docker_step()); + let scan = generate_image_scan_step(&docker).unwrap(); + assert_eq!(scan.image_ref, make_docker_step().image_tag); + } + + #[test] + fn test_fail_on_severity_is_critical_and_high() { + let docker = Some(make_docker_step()); + let scan = generate_image_scan_step(&docker).unwrap(); + assert_eq!(scan.fail_on_severity, "CRITICAL,HIGH"); + } + + #[test] + fn test_format_is_sarif() { + let docker = Some(make_docker_step()); + let scan = generate_image_scan_step(&docker).unwrap(); + assert_eq!(scan.format, "sarif"); + } + + #[test] + fn test_output_is_trivy_sarif_file() { + let docker = Some(make_docker_step()); + let scan = generate_image_scan_step(&docker).unwrap(); + assert_eq!(scan.output, "trivy-results.sarif"); + } + + #[test] + fn test_upload_sarif_is_true() { + let docker = Some(make_docker_step()); + let scan = generate_image_scan_step(&docker).unwrap(); + assert!(scan.upload_sarif); + } + + #[test] + fn test_custom_image_tag_propagated() { + let docker = Some(DockerBuildStep { + image_tag: "ghcr.io/myorg/myapp:abc123".to_string(), + push: true, + qemu: false, + buildx: true, + }); + let scan = generate_image_scan_step(&docker).unwrap(); + assert_eq!(scan.image_ref, "ghcr.io/myorg/myapp:abc123"); + } +} diff --git a/src/generator/ci_generation/lint_step.rs b/src/generator/ci_generation/lint_step.rs new file mode 100644 index 00000000..6f6c946d --- /dev/null +++ b/src/generator/ci_generation/lint_step.rs @@ -0,0 +1,132 @@ +//! Lint Step Generator — CI-06 +//! +//! Maps the detected `Linter` to the correct `LintStep` command. +//! Returns `None` when no linter is detected — the lint step is entirely +//! optional in the CI pipeline model. + +use crate::generator::ci_generation::{ + context::{CiContext, Linter, PackageManager}, + schema::LintStep, +}; + +/// Generates the lint invocation step, or `None` if no linter is detected. +pub fn generate_lint_step(ctx: &CiContext) -> Option { + let command = match &ctx.linter { + Some(Linter::Eslint) => "npx eslint .", + Some(Linter::Prettier) => "npx prettier --check .", + Some(Linter::Pylint) => "pylint src/", + Some(Linter::Ruff) => "ruff check .", + Some(Linter::Clippy) => "cargo clippy -- -D warnings", + Some(Linter::GolangciLint) => "golangci-lint run", + Some(Linter::Checkstyle) => { + if matches!(ctx.package_manager, PackageManager::Gradle) { + "./gradlew checkstyleMain" + } else { + "mvn checkstyle:check" + } + } + Some(Linter::Ktlint) => "ktlint", + Some(Linter::None) | None => return None, + }; + + Some(LintStep { command: command.to_string() }) +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::generator::ci_generation::{ + context::{CiContext, PackageManager}, + test_helpers::make_base_ctx, + }; + use tempfile::TempDir; + + fn ctx_with_linter(linter: Option) -> (CiContext, TempDir) { + let dir = TempDir::new().unwrap(); + let ctx = CiContext { linter, ..make_base_ctx(dir.path(), "") }; + (ctx, dir) + } + + #[test] + fn test_eslint_command() { + let (ctx, _dir) = ctx_with_linter(Some(Linter::Eslint)); + let step = generate_lint_step(&ctx).expect("should produce step"); + assert_eq!(step.command, "npx eslint ."); + } + + #[test] + fn test_prettier_command() { + let (ctx, _dir) = ctx_with_linter(Some(Linter::Prettier)); + let step = generate_lint_step(&ctx).expect("should produce step"); + assert_eq!(step.command, "npx prettier --check ."); + } + + #[test] + fn test_pylint_command() { + let (ctx, _dir) = ctx_with_linter(Some(Linter::Pylint)); + let step = generate_lint_step(&ctx).expect("should produce step"); + assert_eq!(step.command, "pylint src/"); + } + + #[test] + fn test_ruff_command() { + let (ctx, _dir) = ctx_with_linter(Some(Linter::Ruff)); + let step = generate_lint_step(&ctx).expect("should produce step"); + assert_eq!(step.command, "ruff check ."); + } + + #[test] + fn test_clippy_command() { + let (ctx, _dir) = ctx_with_linter(Some(Linter::Clippy)); + let step = generate_lint_step(&ctx).expect("should produce step"); + assert_eq!(step.command, "cargo clippy -- -D warnings"); + } + + #[test] + fn test_golangci_lint_command() { + let (ctx, _dir) = ctx_with_linter(Some(Linter::GolangciLint)); + let step = generate_lint_step(&ctx).expect("should produce step"); + assert_eq!(step.command, "golangci-lint run"); + } + + #[test] + fn test_checkstyle_maven_command() { + // make_base_ctx defaults to a non-Gradle package manager + let (ctx, _dir) = ctx_with_linter(Some(Linter::Checkstyle)); + let step = generate_lint_step(&ctx).expect("should produce step"); + assert_eq!(step.command, "mvn checkstyle:check"); + } + + #[test] + fn test_checkstyle_gradle_command() { + let dir = TempDir::new().unwrap(); + let ctx = CiContext { + linter: Some(Linter::Checkstyle), + package_manager: PackageManager::Gradle, + ..make_base_ctx(dir.path(), "") + }; + let step = generate_lint_step(&ctx).expect("should produce step"); + assert_eq!(step.command, "./gradlew checkstyleMain"); + } + + #[test] + fn test_ktlint_command() { + let (ctx, _dir) = ctx_with_linter(Some(Linter::Ktlint)); + let step = generate_lint_step(&ctx).expect("should produce step"); + assert_eq!(step.command, "ktlint"); + } + + #[test] + fn test_no_linter_returns_none() { + let (ctx, _dir) = ctx_with_linter(None); + assert!(generate_lint_step(&ctx).is_none()); + } + + #[test] + fn test_linter_none_variant_returns_none() { + let (ctx, _dir) = ctx_with_linter(Some(Linter::None)); + assert!(generate_lint_step(&ctx).is_none()); + } +} diff --git a/src/generator/ci_generation/matrix.rs b/src/generator/ci_generation/matrix.rs new file mode 100644 index 00000000..e655bb9a --- /dev/null +++ b/src/generator/ci_generation/matrix.rs @@ -0,0 +1,353 @@ +//! CI-17 — Multi-Version Test Matrix Generator +//! +//! Maps a project's declared runtime version range to a concrete list of +//! LTS/stable versions that should be tested, then renders a GitHub Actions +//! `strategy.matrix` YAML fragment. +//! +//! ## Supported languages and version sources +//! +//! | Language | Version source in `CiContext.runtime_versions` | +//! |----------|------------------------------------------------| +//! | Node.js | `engines.node` from package.json (semver range) | +//! | Python | `python_requires` from pyproject.toml / setup.cfg | +//! | Go | `go` directive in go.mod (exact or `~1.x`) | +//! | Rust | `rust-toolchain.toml` channel (`stable`, `1.x`) | +//! | Java | `source_compatibility` / `java.version` in pom.xml | +//! +//! When a version constraint does not match any known LTS, the module falls +//! back to the detected version string as a single-element list, ensuring +//! the matrix is never empty. + +use crate::generator::ci_generation::context::CiContext; + +// ── Public types ────────────────────────────────────────────────────────────── + +/// A resolved version matrix for a specific language, ready to embed in a +/// GitHub Actions workflow. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct VersionMatrix { + /// Language label (e.g. `"node"`, `"python"`, `"go"`). + pub language: String, + /// Concrete versions to test (e.g. `["18", "20", "22"]`). + pub versions: Vec, + /// Rendered `strategy:` YAML block. + pub rendered_yaml: String, +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Returns a `VersionMatrix` for the project's primary language when at least +/// two distinct LTS versions are identified. Returns `None` for languages +/// with only a single relevant version or no version information. +pub fn generate_version_matrix(ctx: &CiContext) -> Option { + let lang = ctx.primary_language.to_lowercase(); + let key = runtime_key(&lang); + let constraint = ctx.runtime_versions.get(key).map(|s| s.as_str()).unwrap_or(""); + + let versions = expand_versions(&lang, constraint); + if versions.len() < 2 { + return None; + } + + let rendered_yaml = render_matrix_yaml(&lang, &versions); + Some(VersionMatrix { + language: lang, + versions, + rendered_yaml, + }) +} + +/// Expands a version constraint string to a list of concrete LTS / stable +/// version strings. Exposed for testing. +pub fn expand_versions(language: &str, constraint: &str) -> Vec { + match language { + "node" | "node.js" | "javascript" | "typescript" => expand_node(constraint), + "python" => expand_python(constraint), + "go" => expand_go(constraint), + "rust" => expand_rust(constraint), + "java" | "kotlin" => expand_java(constraint), + _ => { + if constraint.is_empty() { + vec![] + } else { + vec![constraint.to_string()] + } + } + } +} + +// ── Language-specific expanders ─────────────────────────────────────────────── + +/// Node.js LTS versions (even majors ≄ 18 are Active/Maintenance LTS). +static NODE_LTS: &[&str] = &["18", "20", "22"]; + +fn expand_node(constraint: &str) -> Vec { + if constraint.is_empty() { + // No constraint → test all current LTS + return NODE_LTS.iter().map(|s| s.to_string()).collect(); + } + let min = parse_semver_lower_bound(constraint).unwrap_or(0); + let upper = parse_semver_upper_bound(constraint); + NODE_LTS + .iter() + .filter(|v| { + let n: u32 = v.parse().unwrap_or(0); + n >= min && upper.map_or(true, |(m, inclusive)| if inclusive { n <= m } else { n < m }) + }) + .map(|s| s.to_string()) + .collect() +} + +/// Python CPython versions currently receiving security / active support. +static PYTHON_LTS: &[&str] = &["3.10", "3.11", "3.12", "3.13"]; + +fn expand_python(constraint: &str) -> Vec { + if constraint.is_empty() { + return PYTHON_LTS.iter().map(|s| s.to_string()).collect(); + } + // `python_requires` is a PEP 440 specifier like `>=3.10,<4` + // Extract the minor version from the lower bound (e.g. `>=3.10` → 10). + let min_minor = parse_python_lower_minor(constraint).unwrap_or(0); + PYTHON_LTS + .iter() + .filter(|v| { + let minor = v.split('.').nth(1).and_then(|s| s.parse::().ok()).unwrap_or(0); + minor >= min_minor + }) + .map(|s| s.to_string()) + .collect() +} + +/// Go versions — stable series for the last two minor releases. +static GO_STABLE: &[&str] = &["1.22", "1.23"]; + +fn expand_go(constraint: &str) -> Vec { + if constraint.is_empty() { + return GO_STABLE.iter().map(|s| s.to_string()).collect(); + } + // go.mod `go 1.21` means minimum — test that and latest stable + let declared = constraint.trim_start_matches("~").trim().to_string(); + let mut versions: Vec = GO_STABLE + .iter() + .filter(|&&v| v >= declared.as_str()) + .map(|s| s.to_string()) + .collect(); + // Always include the declared version if it isn't already present + if !versions.contains(&declared) && !declared.is_empty() { + versions.insert(0, declared); + } + versions +} + +/// Rust — channels. Meaningful matrix is `stable` + `beta`; `nightly` is +/// opt-in by convention. +fn expand_rust(constraint: &str) -> Vec { + match constraint.trim() { + "stable" | "" => vec!["stable".to_string(), "beta".to_string()], + "nightly" => vec!["nightly".to_string()], + channel => vec![channel.to_string(), "stable".to_string()], + } +} + +/// Java LTS releases currently supported by Adoptium/Temurin. +static JAVA_LTS: &[&str] = &["17", "21"]; + +fn expand_java(constraint: &str) -> Vec { + if constraint.is_empty() { + return JAVA_LTS.iter().map(|s| s.to_string()).collect(); + } + let min = parse_semver_lower_bound(constraint).unwrap_or(0); + JAVA_LTS + .iter() + .filter(|v| v.parse::().unwrap_or(0) >= min) + .map(|s| s.to_string()) + .collect() +} + +// ── Version constraint parser ───────────────────────────────────────────────── + +/// Extracts the lower-bound major (or minor for Python/Go) version from a +/// semver constraint like `>=18`, `>=18.0.0`, `^18`, `~1.21`. +fn parse_semver_lower_bound(constraint: &str) -> Option { + // Strip operators and pull the first numeric segment + let stripped = constraint + .trim_start_matches(|c: char| !c.is_ascii_digit()) + .split(|c: char| !c.is_ascii_digit() && c != '.') + .next()?; + // For major-only languages take the first segment; for minor-based (Python, + // Go) take the second if the caller normalises to that. + stripped.split('.').next()?.parse().ok() +} + +/// Extracts an explicit upper bound from a range like `>=18 <23` or `<23`. +/// Returns `(bound, inclusive)` where `inclusive = true` for `<=`. +fn parse_semver_upper_bound(constraint: &str) -> Option<(u32, bool)> { + let lt_pos = constraint.find('<')?; + let after_lt = &constraint[lt_pos + 1..]; + let inclusive = after_lt.starts_with('='); + let digits = after_lt + .trim_start_matches('=') + .trim() + .split(|c: char| !c.is_ascii_digit()) + .next()?; + digits.parse().ok().map(|n| (n, inclusive)) +} + +/// Extracts the minor version from a Python constraint like `>=3.10` → `10`. +fn parse_python_lower_minor(constraint: &str) -> Option { + // Find first `>=3.X` or `>3.X` pattern and extract the minor + let stripped = constraint + .trim_start_matches(|c: char| !c.is_ascii_digit()) + .split(|c: char| c == ',' || c == ' ') + .next()?; + stripped.split('.').nth(1)?.parse().ok() +} + +/// Maps primary language label to the key used in `CiContext.runtime_versions`. +fn runtime_key(language: &str) -> &str { + match language { + "javascript" | "typescript" => "node", + other => other, + } +} + +// ── Markdown renderer ───────────────────────────────────────────────────────── + +fn render_matrix_yaml(language: &str, versions: &[String]) -> String { + let matrix_key = match language { + "node" | "javascript" | "typescript" => "node-version", + "python" => "python-version", + "go" => "go-version", + "rust" => "toolchain", + "java" | "kotlin" => "java-version", + _ => "version", + }; + + let version_list = versions + .iter() + .map(|v| format!("\"{}\"", v)) + .collect::>() + .join(", "); + + format!( + " strategy:\n matrix:\n {}: [{}]\n fail-fast: false\n", + matrix_key, version_list + ) +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use std::path::Path; + use crate::generator::ci_generation::test_helpers::make_base_ctx; + + // ── expand_versions ──────────────────────────────────────────────────── + + #[test] + fn test_node_no_constraint_returns_all_lts() { + let v = expand_versions("node", ""); + assert_eq!(v, vec!["18", "20", "22"]); + } + + #[test] + fn test_node_lower_bound_filters() { + // >=20 should exclude 18 + let v = expand_versions("node", ">=20"); + assert!(!v.contains(&"18".to_string())); + assert!(v.contains(&"20".to_string())); + assert!(v.contains(&"22".to_string())); + } + + #[test] + fn test_node_upper_bound_filters() { + // >=18 <22 should exclude 22 + let v = expand_versions("node", ">=18 <22"); + assert!(v.contains(&"18".to_string())); + assert!(v.contains(&"20".to_string())); + assert!(!v.contains(&"22".to_string())); + } + + #[test] + fn test_python_no_constraint_returns_supported() { + let v = expand_versions("python", ""); + assert!(v.contains(&"3.11".to_string())); + assert!(v.contains(&"3.12".to_string())); + } + + #[test] + fn test_go_no_constraint_returns_stable_pair() { + let v = expand_versions("go", ""); + assert_eq!(v.len(), 2); + } + + #[test] + fn test_rust_stable_returns_stable_and_beta() { + let v = expand_versions("rust", "stable"); + assert_eq!(v, vec!["stable", "beta"]); + } + + #[test] + fn test_rust_nightly_returns_nightly_only() { + let v = expand_versions("rust", "nightly"); + assert_eq!(v, vec!["nightly"]); + } + + #[test] + fn test_java_no_constraint_returns_lts() { + let v = expand_versions("java", ""); + assert_eq!(v, vec!["17", "21"]); + } + + #[test] + fn test_unknown_language_passthrough() { + let v = expand_versions("cobol", "6.5"); + assert_eq!(v, vec!["6.5"]); + } + + #[test] + fn test_unknown_language_empty_constraint_returns_empty() { + let v = expand_versions("cobol", ""); + assert!(v.is_empty()); + } + + // ── generate_version_matrix ──────────────────────────────────────────── + + #[test] + fn test_returns_none_when_single_version() { + let mut ctx = make_base_ctx(Path::new("/tmp/test"), "Python"); + ctx.runtime_versions.insert("python".to_string(), ">=3.13".to_string()); + // Only 3.13 matches >=3.13 in our LTS table → single entry → None + let m = generate_version_matrix(&ctx); + assert!(m.is_none()); + } + + #[test] + fn test_node_matrix_from_context() { + let mut ctx = make_base_ctx(Path::new("/tmp/test"), "JavaScript"); + ctx.runtime_versions.insert("node".to_string(), ">=18".to_string()); + let m = generate_version_matrix(&ctx).unwrap(); + assert_eq!(m.language, "javascript"); + assert!(m.versions.len() >= 2); + } + + #[test] + fn test_rendered_yaml_contains_matrix_key() { + let mut ctx = make_base_ctx(Path::new("/tmp/test"), "JavaScript"); + ctx.runtime_versions.insert("node".to_string(), ">=18".to_string()); + let m = generate_version_matrix(&ctx).unwrap(); + assert!(m.rendered_yaml.contains("node-version")); + assert!(m.rendered_yaml.contains("fail-fast: false")); + } + + #[test] + fn test_rust_matrix_from_context() { + let mut ctx = make_base_ctx(Path::new("/tmp/test"), "Rust"); + ctx.runtime_versions.insert("rust".to_string(), "stable".to_string()); + let m = generate_version_matrix(&ctx).unwrap(); + assert!(m.versions.contains(&"stable".to_string())); + assert!(m.versions.contains(&"beta".to_string())); + assert!(m.rendered_yaml.contains("toolchain")); + } +} diff --git a/src/generator/ci_generation/mod.rs b/src/generator/ci_generation/mod.rs new file mode 100644 index 00000000..e1099281 --- /dev/null +++ b/src/generator/ci_generation/mod.rs @@ -0,0 +1,39 @@ +//! CI/CD Pipeline Generation Module +//! +//! Generates CI and CD pipeline skeletons from project analysis. +//! Follows the same analyze → generate → write pattern as the existing +//! Dockerfile and Compose generators. +//! +//! ## Submodules +//! +//! - `context` — `CiContext` struct and context collector (CI-02) +//! - `runtime_resolver` — Runtime version resolver (CI-03) +//! - `cache` — Dependency cache strategy (CI-04) +//! - `schema` — Platform-agnostic `CiPipeline` data model (CI-14) +//! - `templates` — Per-platform YAML assemblers (CI-11, CI-12, CI-13) +//! - `token_resolver` — Two-pass placeholder token engine (CI-15) +//! - `triggers` — Trigger configuration resolver (CI-18) +pub mod build_step; +pub mod cache; +pub mod ci_config; +pub mod context; +pub mod coverage_step; +pub mod dry_run; +pub mod matrix; +pub mod monorepo; +pub mod notify_step; +pub mod secrets_doc; +pub mod docker_step; +pub mod image_scan_step; +pub mod lint_step; +pub mod runtime_resolver; +pub mod secret_scan_step; +pub mod schema; +pub mod templates; +pub mod test_step; +pub mod token_resolver; +pub mod triggers; +pub mod writer; +pub mod pipeline; + +pub mod test_helpers; diff --git a/src/generator/ci_generation/monorepo.rs b/src/generator/ci_generation/monorepo.rs new file mode 100644 index 00000000..cb53605f --- /dev/null +++ b/src/generator/ci_generation/monorepo.rs @@ -0,0 +1,260 @@ +//! CI-16 — Monorepo CI Strategy Generator +//! +//! When `CiContext.monorepo = true` this module generates two GitHub Actions +//! job fragments that together implement a path-filtered matrix build: +//! +//! 1. `detect-changes` — uses `dorny/paths-filter` to produce a JSON list of +//! packages whose files changed in the current push/PR. +//! 2. `ci` (matrix) — depends on `detect-changes`, fans out one runner per +//! changed package, scoping all steps to that package subdirectory. +//! +//! The fragments are returned as YAML strings so the template builders +//! (CI-11/12/13) can splice them in without knowing the internals of this +//! module. For non-monorepo projects the public functions return `None`, +//! which callers treat as "use single-project job structure". + +use crate::generator::ci_generation::context::CiContext; + +// ── Public types ────────────────────────────────────────────────────────────── + +/// Rendered monorepo strategy ready for insertion into a GitHub Actions workflow. +#[derive(Debug, Clone)] +pub struct MonorepoStrategy { + /// Packages detected in the repository (relative paths from root). + pub packages: Vec, + /// YAML fragment for the `detect-changes` job. + pub detect_job_yaml: String, + /// YAML fragment for the matrix `ci` job (references `detect-changes`). + pub matrix_job_yaml: String, + /// `dorny/paths-filter` filter block — one entry per package. + pub filter_config: String, +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Returns a `MonorepoStrategy` when `ctx.monorepo` is `true` and at least +/// two packages are present. Returns `None` for single-project repositories +/// so callers can unconditionally call this and branch on `Option`. +pub fn generate_monorepo_strategy(ctx: &CiContext) -> Option { + if !ctx.monorepo || ctx.monorepo_packages.len() < 2 { + return None; + } + let packages = ctx.monorepo_packages.clone(); + let filter_config = build_filter_config(&packages); + let detect_job_yaml = build_detect_job(&filter_config); + let matrix_job_yaml = build_matrix_job(ctx, &packages); + Some(MonorepoStrategy { + packages, + detect_job_yaml, + matrix_job_yaml, + filter_config, + }) +} + +// ── Internal builders ───────────────────────────────────────────────────────── + +/// Builds the `dorny/paths-filter` `filters` block. +/// +/// Each package gets a filter named after its directory slug — any file +/// change under that directory triggers the corresponding matrix entry. +fn build_filter_config(packages: &[String]) -> String { + let mut out = String::new(); + for pkg in packages { + let slug = package_slug(pkg); + out.push_str(&format!(" {}:\n - '{}/**'\n", slug, pkg)); + } + out +} + +/// Builds the `detect-changes` job YAML fragment. +fn build_detect_job(filter_config: &str) -> String { + format!( + r#" detect-changes: + runs-on: ubuntu-latest + outputs: + packages: ${{{{ steps.filter.outputs.changes }}}} + steps: + - uses: actions/checkout@v4 + - uses: dorny/paths-filter@v3 + id: filter + with: + filters: | +{} +"#, + filter_config + ) +} + +/// Builds the matrix `ci` job YAML fragment. +/// +/// Each matrix value is the package slug; the actual path is reconstructed +/// inside the job via the `PACKAGE_PATH` env variable derived from the matrix +/// entry name. This keeps the YAML readable while preserving round-trip +/// correctness. +fn build_matrix_job(ctx: &CiContext, packages: &[String]) -> String { + let slugs: Vec = packages.iter().map(|p| package_slug(p)).collect(); + let matrix_list = slugs + .iter() + .map(|s| format!("\"{}\"", s)) + .collect::>() + .join(", "); + + let test_cmd = ctx + .config_test_command + .as_deref() + .unwrap_or("{{TEST_COMMAND}}") + .to_string(); + let build_cmd = ctx + .build_command + .as_deref() + .unwrap_or("{{BUILD_COMMAND}}") + .to_string(); + + format!( + r#" ci: + needs: detect-changes + if: ${{{{ needs.detect-changes.outputs.packages != '[]' }}}} + runs-on: ubuntu-latest + strategy: + matrix: + package: ${{{{ fromJson(needs.detect-changes.outputs.packages) }}}} + fail-fast: false + defaults: + run: + working-directory: ${{{{ matrix.package }}}} + steps: + - uses: actions/checkout@v4 + + # CI-03: runtime + cache scoped to package directory + - uses: actions/cache@v4 + with: + path: "{{{{CACHE_PATH}}}}" + key: "${{{{ runner.os }}}}-${{{{ matrix.package }}}}-${{{{ hashFiles(format('{{{{LOCK_FILE}}}}') ) }}}}" + + - name: Install dependencies + run: "{{{{INSTALL_COMMAND}}}}" + + - name: Test + run: {test_cmd} + + - name: Build + run: {build_cmd} + # Available packages: [{matrix_list}] +"# + ) +} + +/// Converts a package path like `packages/api` into a slug `api`, or +/// `services/auth-service` into `auth-service`. Uses the last path component. +fn package_slug(path: &str) -> String { + path.trim_end_matches('/') + .split('/') + .last() + .unwrap_or(path) + .replace(|c: char| !c.is_alphanumeric() && c != '-' && c != '_', "-") +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use std::path::Path; + use crate::generator::ci_generation::test_helpers::make_base_ctx; + + fn monorepo_ctx(packages: &[&str]) -> CiContext { + let mut ctx = make_base_ctx(Path::new("/tmp/test"), "Rust"); + ctx.monorepo = true; + ctx.monorepo_packages = packages.iter().map(|s| s.to_string()).collect(); + ctx + } + + #[test] + fn test_returns_none_for_single_project() { + let ctx = make_base_ctx(Path::new("/tmp/test"), "Rust"); + assert!(generate_monorepo_strategy(&ctx).is_none()); + } + + #[test] + fn test_returns_none_when_monorepo_flag_false() { + let mut ctx = make_base_ctx(Path::new("/tmp/test"), "Rust"); + ctx.monorepo = true; + ctx.monorepo_packages = vec!["packages/api".to_string()]; // only one + assert!(generate_monorepo_strategy(&ctx).is_none()); + } + + #[test] + fn test_returns_strategy_for_two_packages() { + let ctx = monorepo_ctx(&["packages/api", "packages/web"]); + let strategy = generate_monorepo_strategy(&ctx).unwrap(); + assert_eq!(strategy.packages.len(), 2); + } + + #[test] + fn test_detect_job_contains_dorny_filter() { + let ctx = monorepo_ctx(&["packages/api", "packages/web"]); + let s = generate_monorepo_strategy(&ctx).unwrap(); + assert!(s.detect_job_yaml.contains("dorny/paths-filter")); + } + + #[test] + fn test_detect_job_outputs_packages() { + let ctx = monorepo_ctx(&["packages/api", "packages/web"]); + let s = generate_monorepo_strategy(&ctx).unwrap(); + assert!(s.detect_job_yaml.contains("packages:")); + assert!(s.detect_job_yaml.contains("outputs.changes")); + } + + #[test] + fn test_filter_config_covers_each_package() { + let ctx = monorepo_ctx(&["packages/api", "packages/web"]); + let s = generate_monorepo_strategy(&ctx).unwrap(); + assert!(s.filter_config.contains("api:")); + assert!(s.filter_config.contains("web:")); + assert!(s.filter_config.contains("packages/api/**")); + assert!(s.filter_config.contains("packages/web/**")); + } + + #[test] + fn test_matrix_job_needs_detect_changes() { + let ctx = monorepo_ctx(&["packages/api", "packages/web"]); + let s = generate_monorepo_strategy(&ctx).unwrap(); + assert!(s.matrix_job_yaml.contains("needs: detect-changes")); + } + + #[test] + fn test_matrix_job_uses_fail_fast_false() { + let ctx = monorepo_ctx(&["packages/api", "packages/web"]); + let s = generate_monorepo_strategy(&ctx).unwrap(); + assert!(s.matrix_job_yaml.contains("fail-fast: false")); + } + + #[test] + fn test_matrix_job_working_directory() { + let ctx = monorepo_ctx(&["packages/api", "packages/web"]); + let s = generate_monorepo_strategy(&ctx).unwrap(); + assert!(s.matrix_job_yaml.contains("working-directory:")); + assert!(s.matrix_job_yaml.contains("matrix.package")); + } + + #[test] + fn test_package_slug_last_component() { + assert_eq!(package_slug("packages/api"), "api"); + assert_eq!(package_slug("services/auth-service"), "auth-service"); + assert_eq!(package_slug("web"), "web"); + } + + #[test] + fn test_package_slug_strips_trailing_slash() { + assert_eq!(package_slug("packages/api/"), "api"); + } + + #[test] + fn test_three_packages_all_appear_in_matrix() { + let ctx = monorepo_ctx(&["packages/api", "packages/web", "packages/worker"]); + let s = generate_monorepo_strategy(&ctx).unwrap(); + assert!(s.matrix_job_yaml.contains("\"api\"")); + assert!(s.matrix_job_yaml.contains("\"web\"")); + assert!(s.matrix_job_yaml.contains("\"worker\"")); + } +} diff --git a/src/generator/ci_generation/notify_step.rs b/src/generator/ci_generation/notify_step.rs new file mode 100644 index 00000000..4580f063 --- /dev/null +++ b/src/generator/ci_generation/notify_step.rs @@ -0,0 +1,208 @@ +//! CI-24 — Notification Step (CI Failure) +//! +//! Optional step emitted when `--notify` is passed on the CLI or `notify = +//! true` is set in `.syncable.ci.toml`. The rendered step fires only on job +//! failure (`if: failure()`) and requires two repository secrets. +//! +//! ## Generated YAML (GitHub Actions) +//! +//! ```yaml +//! - name: Notify on failure +//! if: failure() +//! uses: slackapi/slack-github-action@v2 +//! with: +//! channel-id: ${{ secrets.SLACK_CHANNEL_ID }} +//! slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }} +//! payload: | +//! {"text": "āŒ CI failed on `${{ github.ref_name }}` — ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"} +//! ``` +//! +//! Both `SLACK_BOT_TOKEN` and `SLACK_CHANNEL_ID` are appended as *optional* +//! entries in `SECRETS_REQUIRED.md` so the user knows exactly where to +//! configure them. + +// ── Public types ────────────────────────────────────────────────────────────── + +/// A resolved Slack notification step, ready for YAML rendering. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct NotifyStep { + /// Repository secret name for the Slack bot token. + pub token_secret: String, + /// Repository secret name for the Slack channel ID. + pub channel_secret: String, +} + +impl Default for NotifyStep { + fn default() -> Self { + Self { + token_secret: "SLACK_BOT_TOKEN".to_string(), + channel_secret: "SLACK_CHANNEL_ID".to_string(), + } + } +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Returns `Some(NotifyStep)` when `enabled` is true, `None` otherwise. +/// +/// Template builders call this with the resolved `notify` flag so the step +/// is omitted from YAML when notifications are not requested. +pub fn generate_notify_step(enabled: bool) -> Option { + if enabled { Some(NotifyStep::default()) } else { None } +} + +/// Renders the notify step as a GitHub Actions YAML step snippet. +/// +/// The step is conditionally gated with `if: failure()` and references both +/// secrets via `${{ secrets.* }}` expressions so no secret values appear in +/// the generated file. +pub fn render_notify_yaml(step: &NotifyStep) -> String { + format!( + "\ + - name: Notify on failure + if: failure() + uses: slackapi/slack-github-action@v2 + with: + channel-id: ${{{{ secrets.{channel} }}}} + slack-bot-token: ${{{{ secrets.{token} }}}} + payload: | + {{\"text\": \"\\u274c CI failed on `${{{{ github.ref_name }}}}` \\u2014 ${{{{ github.server_url }}}}/${{{{ github.repository }}}}/actions/runs/${{{{ github.run_id }}}}\"}}\n", + channel = step.channel_secret, + token = step.token_secret, + ) +} + +/// Renders the `SLACK_BOT_TOKEN` and `SLACK_CHANNEL_ID` entries for +/// `SECRETS_REQUIRED.md`. +pub fn notify_secrets_doc_entries(step: &NotifyStep) -> String { + format!( + "\ +### `{token}` *(optional)* + +Slack bot OAuth token used by the CI failure notification step. + +**Where to set:** Repository → Settings → Secrets and variables → Actions + +**How to obtain:** → your app → OAuth & Permissions → Bot User OAuth Token + +--- + +### `{channel}` *(optional)* + +Slack channel ID that receives CI failure notifications. + +**Where to set:** Repository → Settings → Secrets and variables → Actions + +**How to obtain:** Right-click a channel in Slack → Copy link — the ID is the last path segment (e.g. `C0123ABCDEF`).\n", + token = step.token_secret, + channel = step.channel_secret, + ) +} + +// ── Tests ────────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + // ── generate_notify_step ────────────────────────────────────────── + + #[test] + fn test_returns_none_when_disabled() { + assert!(generate_notify_step(false).is_none()); + } + + #[test] + fn test_returns_some_when_enabled() { + assert!(generate_notify_step(true).is_some()); + } + + #[test] + fn test_default_secret_names() { + let step = generate_notify_step(true).unwrap(); + assert_eq!(step.token_secret, "SLACK_BOT_TOKEN"); + assert_eq!(step.channel_secret, "SLACK_CHANNEL_ID"); + } + + // ── render_notify_yaml ──────────────────────────────────────────── + + #[test] + fn test_yaml_contains_action_reference() { + let step = generate_notify_step(true).unwrap(); + let yaml = render_notify_yaml(&step); + assert!(yaml.contains("slackapi/slack-github-action@v2")); + } + + #[test] + fn test_yaml_gated_on_failure() { + let step = generate_notify_step(true).unwrap(); + let yaml = render_notify_yaml(&step); + assert!(yaml.contains("if: failure()")); + } + + #[test] + fn test_yaml_references_channel_secret() { + let step = generate_notify_step(true).unwrap(); + let yaml = render_notify_yaml(&step); + assert!(yaml.contains("SLACK_CHANNEL_ID")); + } + + #[test] + fn test_yaml_references_token_secret() { + let step = generate_notify_step(true).unwrap(); + let yaml = render_notify_yaml(&step); + assert!(yaml.contains("SLACK_BOT_TOKEN")); + } + + #[test] + fn test_yaml_contains_payload_with_run_id() { + let step = generate_notify_step(true).unwrap(); + let yaml = render_notify_yaml(&step); + assert!(yaml.contains("github.run_id")); + } + + #[test] + fn test_yaml_no_hardcoded_secret_values() { + let step = generate_notify_step(true).unwrap(); + let yaml = render_notify_yaml(&step); + // Ensure secrets are referenced, not embedded + assert!(!yaml.contains("xoxb-")); + assert!(!yaml.contains("xapp-")); + } + + #[test] + fn test_custom_secret_names_propagated() { + let step = NotifyStep { + token_secret: "MY_SLACK_TOKEN".to_string(), + channel_secret: "MY_SLACK_CHANNEL".to_string(), + }; + let yaml = render_notify_yaml(&step); + assert!(yaml.contains("MY_SLACK_TOKEN")); + assert!(yaml.contains("MY_SLACK_CHANNEL")); + assert!(!yaml.contains("SLACK_BOT_TOKEN")); + } + + // ── notify_secrets_doc_entries ──────────────────────────────────── + + #[test] + fn test_secrets_doc_contains_both_secrets() { + let step = generate_notify_step(true).unwrap(); + let doc = notify_secrets_doc_entries(&step); + assert!(doc.contains("SLACK_BOT_TOKEN")); + assert!(doc.contains("SLACK_CHANNEL_ID")); + } + + #[test] + fn test_secrets_doc_marks_both_as_optional() { + let step = generate_notify_step(true).unwrap(); + let doc = notify_secrets_doc_entries(&step); + assert_eq!(doc.matches("optional").count(), 2); + } + + #[test] + fn test_secrets_doc_includes_setup_instructions() { + let step = generate_notify_step(true).unwrap(); + let doc = notify_secrets_doc_entries(&step); + assert!(doc.contains("api.slack.com")); + } +} diff --git a/src/generator/ci_generation/pipeline.rs b/src/generator/ci_generation/pipeline.rs new file mode 100644 index 00000000..41eb632d --- /dev/null +++ b/src/generator/ci_generation/pipeline.rs @@ -0,0 +1,159 @@ +//! CI Pipeline Orchestrator — CI-01 (wiring) +//! +//! `build_ci_pipeline` is the single entry point that assembles a complete +//! `CiPipeline` from a `CiContext`. It calls every step-generator module in +//! canonical order and collects unresolved tokens from each. + +use crate::generator::ci_generation::{ + build_step::generate_build_step, + cache::resolve_cache, + context::{CiContext, PackageManager}, + docker_step::generate_docker_step, + image_scan_step::generate_image_scan_step, + lint_step::generate_lint_step, + runtime_resolver::resolve_runtime, + schema::{ + ArtifactStep, BuildStep, CacheStep, CiPipeline, InstallStep, LintStep, RuntimeStep, + TestStep, UnresolvedToken, + }, + secret_scan_step::generate_secret_scan_step, + test_step::generate_test_step, + triggers::resolve_triggers, +}; + +/// Assembles a complete `CiPipeline` from a collected `CiContext`. +/// +/// When `skip_docker` is `true` the Docker build, image scan, and artifact +/// upload steps are omitted even if a Dockerfile is present. +pub fn build_ci_pipeline(ctx: &CiContext, skip_docker: bool) -> CiPipeline { + let mut unresolved: Vec = Vec::new(); + + // ── Triggers ────────────────────────────────────────────────────────── + let triggers = resolve_triggers(ctx); + + // ── Runtime / toolchain ─────────────────────────────────────────────── + let runtime_setup = resolve_runtime(ctx); + for token_name in &runtime_setup.unresolved_tokens { + unresolved.push(UnresolvedToken::new( + token_name, + "Runtime version — check your version file or CI requirements", + "string", + )); + } + let runtime = RuntimeStep { + action: runtime_setup.action.to_string(), + version: runtime_setup.version, + }; + + // ── Cache ───────────────────────────────────────────────────────────── + let cache = resolve_cache(ctx).map(|c| CacheStep { + paths: c.paths, + key: c.key, + restore_keys: c.restore_keys, + }); + + // ── Install ─────────────────────────────────────────────────────────── + let install = InstallStep { command: install_command(&ctx.package_manager) }; + + // ── Lint ────────────────────────────────────────────────────────────── + let lint = generate_lint_step(ctx).map(|l| LintStep { command: l.command }); + + // ── Test ────────────────────────────────────────────────────────────── + let test_step_raw = generate_test_step(ctx); + if test_step_raw.command.contains("{{TEST_COMMAND}}") { + unresolved.push(UnresolvedToken::new( + "TEST_COMMAND", + "Command to run your test suite", + "string", + )); + } + let test = TestStep { + command: test_step_raw.command, + coverage_flag: test_step_raw.coverage_flag, + coverage_report_path: test_step_raw.coverage_report_path, + }; + + // ── Build ───────────────────────────────────────────────────────────── + let build = generate_build_step(ctx).map(|b| { + if b.command.contains("{{BUILD_COMMAND}}") { + unresolved.push(UnresolvedToken::new( + "BUILD_COMMAND", + "Command to compile or bundle your project", + "string", + )); + } + BuildStep { command: b.command, artifact_path: b.artifact_path } + }); + + // ── Docker & image scan ─────────────────────────────────────────────── + let (docker_build, image_scan) = if skip_docker { + (None, None) + } else { + let d = generate_docker_step(ctx); + if let Some(ref ds) = d { + if ds.image_tag.contains("{{REGISTRY_URL}}") || ds.image_tag.contains("{{IMAGE_NAME}}") { + unresolved.push(UnresolvedToken::new( + "REGISTRY_URL", + "Container registry URL e.g. ghcr.io/org/repo", + "url", + )); + unresolved.push(UnresolvedToken::new( + "IMAGE_NAME", + "Image name e.g. my-app", + "string", + )); + } + } + let scan = generate_image_scan_step(&d); + (d, scan) + }; + + // ── Secret scan ─────────────────────────────────────────────────────── + let secret_scan = generate_secret_scan_step(); + + // ── Artifact upload ─────────────────────────────────────────────────── + let upload_artifact = build.as_ref().and_then(|b| { + b.artifact_path.as_ref().map(|path| ArtifactStep { + name: ctx.project_name.clone(), + path: path.clone(), + }) + }); + + CiPipeline { + project_name: ctx.project_name.clone(), + platform: ctx.platform.clone(), + format: ctx.format.clone(), + triggers, + runtime, + cache, + install, + lint, + test, + build, + docker_build, + image_scan, + secret_scan, + upload_artifact, + unresolved_tokens: unresolved, + } +} + +// ── Helpers ─────────────────────────────────────────────────────────────────── + +/// Maps a `PackageManager` to its standard install command. +fn install_command(pm: &PackageManager) -> String { + match pm { + PackageManager::Npm => "npm ci".to_string(), + PackageManager::Yarn => "yarn install --frozen-lockfile".to_string(), + PackageManager::Pnpm => "pnpm install --frozen-lockfile".to_string(), + PackageManager::Bun => "bun install".to_string(), + PackageManager::Pip => "pip install -r requirements.txt".to_string(), + PackageManager::Poetry => "poetry install --no-interaction".to_string(), + PackageManager::Uv => "uv sync".to_string(), + PackageManager::Cargo => "cargo fetch".to_string(), + PackageManager::GoMod => "go mod download".to_string(), + PackageManager::Maven => "mvn dependency:resolve -q".to_string(), + PackageManager::Gradle => "./gradlew dependencies --quiet".to_string(), + PackageManager::Unknown => "{{INSTALL_COMMAND}}".to_string(), + } +} diff --git a/src/generator/ci_generation/runtime_resolver.rs b/src/generator/ci_generation/runtime_resolver.rs new file mode 100644 index 00000000..9adac11c --- /dev/null +++ b/src/generator/ci_generation/runtime_resolver.rs @@ -0,0 +1,327 @@ +//! CI-03 — Runtime version resolver. +//! +//! Maps `CiContext.primary_language` to the correct GitHub Actions setup +//! action and version string, reading version files from disk when needed. + +use std::path::Path; + +use serde::Serialize; + +use crate::generator::ci_generation::context::CiContext; + +// ── Public types ────────────────────────────────────────────────────────────── + +/// Resolved setup step for the project's primary runtime. +#[derive(Debug, Clone, Serialize)] +pub struct RuntimeSetup { + /// GitHub Actions action identifier, e.g. `"actions/setup-node@v4"`. + pub action: &'static str, + /// Resolved version string, or `"{{RUNTIME_VERSION}}"` when unknown. + pub version: String, + /// Token names that could not be resolved and require manual substitution. + pub unresolved_tokens: Vec, +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Resolves the runtime setup step from a `CiContext`. +/// +/// Falls back to `{{RUNTIME_VERSION}}` when no version file is found and +/// records the token name in `unresolved_tokens` for downstream warning. +pub fn resolve_runtime(ctx: &CiContext) -> RuntimeSetup { + let root = &ctx.analysis.project_root; + let lang = ctx.primary_language.to_lowercase(); + + match lang.as_str() { + "typescript" | "javascript" => resolve_node(root, ctx), + "python" => resolve_python(root, ctx), + "go" => resolve_go(root, ctx), + "rust" => resolve_rust(root), + "java" | "kotlin" => resolve_java(root, ctx), + _ => unresolved("RUNTIME_VERSION"), + } +} + +// ── Language resolvers ──────────────────────────────────────────────────────── + +fn resolve_node(root: &Path, ctx: &CiContext) -> RuntimeSetup { + // Priority: .nvmrc → .node-version → engines.node in package.json → CiContext + let version = read_first_line(root, ".nvmrc") + .or_else(|| read_first_line(root, ".node-version")) + .or_else(|| extract_engines_node(root)) + .or_else(|| ctx.runtime_versions.get("TypeScript").or_else(|| ctx.runtime_versions.get("JavaScript")).cloned()) + .unwrap_or_else(|| "{{RUNTIME_VERSION}}".to_string()); + + make_setup("actions/setup-node@v4", version, "RUNTIME_VERSION") +} + +fn resolve_python(root: &Path, ctx: &CiContext) -> RuntimeSetup { + // Priority: .python-version → pyproject.toml requires-python → Pipfile → CiContext + let version = read_first_line(root, ".python-version") + .or_else(|| extract_pyproject_python(root)) + .or_else(|| extract_pipfile_python(root)) + .or_else(|| ctx.runtime_versions.get("Python").cloned()) + .unwrap_or_else(|| "{{RUNTIME_VERSION}}".to_string()); + + make_setup("actions/setup-python@v5", version, "RUNTIME_VERSION") +} + +fn resolve_go(root: &Path, ctx: &CiContext) -> RuntimeSetup { + // go.mod `go X.YY` directive → CiContext + let version = extract_go_mod(root) + .or_else(|| ctx.runtime_versions.get("Go").cloned()) + .unwrap_or_else(|| "{{RUNTIME_VERSION}}".to_string()); + + make_setup("actions/setup-go@v5", version, "RUNTIME_VERSION") +} + +fn resolve_rust(root: &Path) -> RuntimeSetup { + // rust-toolchain.toml `channel` field → rust-toolchain file → "stable" + let version = extract_rust_toolchain(root).unwrap_or_else(|| "stable".to_string()); + RuntimeSetup { + action: "dtolnay/rust-toolchain@master", + version, + unresolved_tokens: Vec::new(), + } +} + +fn resolve_java(root: &Path, ctx: &CiContext) -> RuntimeSetup { + // pom.xml → build.gradle targetCompatibility → CiContext + let version = extract_pom_java_version(root) + .or_else(|| extract_gradle_java_version(root)) + .or_else(|| ctx.runtime_versions.get("Java").or_else(|| ctx.runtime_versions.get("Kotlin")).cloned()) + .unwrap_or_else(|| "{{RUNTIME_VERSION}}".to_string()); + + make_setup("actions/setup-java@v4", version, "RUNTIME_VERSION") +} + +// ── File extraction helpers ─────────────────────────────────────────────────── + +/// Reads the first non-empty, non-comment line from a file. +fn read_first_line(root: &Path, file: &str) -> Option { + let content = std::fs::read_to_string(root.join(file)).ok()?; + content + .lines() + .map(str::trim) + .find(|l| !l.is_empty() && !l.starts_with('#')) + .map(|l| l.trim_start_matches('v').to_string()) +} + +/// Extracts `engines.node` from `package.json` (e.g. `">=18.0.0"` → `"18"`). +fn extract_engines_node(root: &Path) -> Option { + let content = std::fs::read_to_string(root.join("package.json")).ok()?; + let json: serde_json::Value = serde_json::from_str(&content).ok()?; + let raw = json["engines"]["node"].as_str()?.to_string(); + // Strip leading range operators: >=18.0.0 → 18 + let stripped = raw.trim_start_matches(|c: char| !c.is_ascii_digit()); + let major = stripped.split('.').next()?; + Some(major.to_string()) +} + +/// Extracts `requires-python` from `pyproject.toml` (e.g. `">=3.11"` → `"3.11"`). +fn extract_pyproject_python(root: &Path) -> Option { + let content = std::fs::read_to_string(root.join("pyproject.toml")).ok()?; + for line in content.lines() { + let line = line.trim(); + if line.starts_with("requires-python") { + let value = line.split('=').nth(1)?.trim().trim_matches('"').trim_matches('\''); + let stripped = value.trim_start_matches(|c: char| !c.is_ascii_digit()); + return Some(stripped.to_string()); + } + } + None +} + +/// Extracts `python_requires` from `Pipfile`. +fn extract_pipfile_python(root: &Path) -> Option { + let content = std::fs::read_to_string(root.join("Pipfile")).ok()?; + for line in content.lines() { + let line = line.trim(); + if line.starts_with("python_version") || line.starts_with("python_full_version") { + let value = line.split('=').nth(1)?.trim().trim_matches('"').trim_matches('\''); + return Some(value.to_string()); + } + } + None +} + +/// Extracts the `go X.YY` directive from `go.mod`. +fn extract_go_mod(root: &Path) -> Option { + let content = std::fs::read_to_string(root.join("go.mod")).ok()?; + for line in content.lines() { + let line = line.trim(); + if line.starts_with("go ") { + return Some(line[3..].trim().to_string()); + } + } + None +} + +/// Extracts `channel` from `rust-toolchain.toml`, or reads a bare `rust-toolchain` file. +fn extract_rust_toolchain(root: &Path) -> Option { + // TOML form + if let Ok(content) = std::fs::read_to_string(root.join("rust-toolchain.toml")) { + for line in content.lines() { + let line = line.trim(); + if line.starts_with("channel") { + let value = line.split('=').nth(1)?.trim().trim_matches('"').trim_matches('\''); + return Some(value.to_string()); + } + } + } + // Legacy single-line form + read_first_line(root, "rust-toolchain") +} + +/// Extracts `` from `pom.xml`. +fn extract_pom_java_version(root: &Path) -> Option { + let content = std::fs::read_to_string(root.join("pom.xml")).ok()?; + for line in content.lines() { + let line = line.trim(); + if line.starts_with("") { + let inner = line + .trim_start_matches("") + .trim_end_matches(""); + return Some(inner.to_string()); + } + } + None +} + +/// Extracts `targetCompatibility` or `sourceCompatibility` from `build.gradle`. +fn extract_gradle_java_version(root: &Path) -> Option { + let content = std::fs::read_to_string(root.join("build.gradle")) + .or_else(|_| std::fs::read_to_string(root.join("build.gradle.kts"))) + .ok()?; + for line in content.lines() { + let line = line.trim(); + if line.starts_with("targetCompatibility") || line.starts_with("sourceCompatibility") { + let value = line + .split(['=', ' ']) + .last()? + .trim() + .trim_matches('"') + .trim_matches('\''); + return Some(value.to_string()); + } + } + None +} + +// ── Internal utilities ──────────────────────────────────────────────────────── + +/// Builds a `RuntimeSetup`, recording a token if the version was not resolved. +fn make_setup(action: &'static str, version: String, token: &str) -> RuntimeSetup { + let unresolved_tokens = if version.contains("{{") { + vec![token.to_string()] + } else { + Vec::new() + }; + RuntimeSetup { action, version, unresolved_tokens } +} + +/// Returns an unresolved `RuntimeSetup` for unknown languages. +fn unresolved(token: &str) -> RuntimeSetup { + RuntimeSetup { + action: "{{SETUP_ACTION}}", + version: format!("{{{{{token}}}}}"), + unresolved_tokens: vec![token.to_string(), "SETUP_ACTION".to_string()], + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use std::fs; + use tempfile::TempDir; + + use crate::generator::ci_generation::test_helpers::make_base_ctx; + + fn make_ctx(lang: &str, root: &Path) -> CiContext { + make_base_ctx(root, lang) + } + + #[test] + fn node_nvmrc() { + let dir = TempDir::new().unwrap(); + fs::write(dir.path().join(".nvmrc"), "20.11.0\n").unwrap(); + let ctx = make_ctx("TypeScript", dir.path()); + let setup = resolve_runtime(&ctx); + assert_eq!(setup.action, "actions/setup-node@v4"); + assert_eq!(setup.version, "20.11.0"); + assert!(setup.unresolved_tokens.is_empty()); + } + + #[test] + fn node_no_version_file_emits_placeholder() { + let dir = TempDir::new().unwrap(); + let ctx = make_ctx("JavaScript", dir.path()); + let setup = resolve_runtime(&ctx); + assert_eq!(setup.version, "{{RUNTIME_VERSION}}"); + assert!(setup.unresolved_tokens.contains(&"RUNTIME_VERSION".to_string())); + } + + #[test] + fn python_python_version_file() { + let dir = TempDir::new().unwrap(); + fs::write(dir.path().join(".python-version"), "3.12\n").unwrap(); + let ctx = make_ctx("Python", dir.path()); + let setup = resolve_runtime(&ctx); + assert_eq!(setup.action, "actions/setup-python@v5"); + assert_eq!(setup.version, "3.12"); + assert!(setup.unresolved_tokens.is_empty()); + } + + #[test] + fn go_mod_version() { + let dir = TempDir::new().unwrap(); + fs::write(dir.path().join("go.mod"), "module example.com/app\n\ngo 1.22\n").unwrap(); + let ctx = make_ctx("Go", dir.path()); + let setup = resolve_runtime(&ctx); + assert_eq!(setup.action, "actions/setup-go@v5"); + assert_eq!(setup.version, "1.22"); + assert!(setup.unresolved_tokens.is_empty()); + } + + #[test] + fn rust_toolchain_toml() { + let dir = TempDir::new().unwrap(); + fs::write(dir.path().join("rust-toolchain.toml"), "[toolchain]\nchannel = \"1.77\"\n").unwrap(); + let ctx = make_ctx("Rust", dir.path()); + let setup = resolve_runtime(&ctx); + assert_eq!(setup.action, "dtolnay/rust-toolchain@master"); + assert_eq!(setup.version, "1.77"); + assert!(setup.unresolved_tokens.is_empty()); + } + + #[test] + fn rust_no_toolchain_file_defaults_stable() { + let dir = TempDir::new().unwrap(); + let ctx = make_ctx("Rust", dir.path()); + let setup = resolve_runtime(&ctx); + assert_eq!(setup.version, "stable"); + assert!(setup.unresolved_tokens.is_empty()); + } + + #[test] + fn java_pom_xml() { + let dir = TempDir::new().unwrap(); + fs::write(dir.path().join("pom.xml"), "\n\n17\n\n").unwrap(); + let ctx = make_ctx("Java", dir.path()); + let setup = resolve_runtime(&ctx); + assert_eq!(setup.action, "actions/setup-java@v4"); + assert_eq!(setup.version, "17"); + assert!(setup.unresolved_tokens.is_empty()); + } + + #[test] + fn unknown_language_emits_both_placeholders() { + let dir = TempDir::new().unwrap(); + let ctx = make_ctx("Elixir", dir.path()); + let setup = resolve_runtime(&ctx); + assert!(setup.version.contains("{{")); + assert!(setup.unresolved_tokens.contains(&"SETUP_ACTION".to_string())); + } +} diff --git a/src/generator/ci_generation/schema.rs b/src/generator/ci_generation/schema.rs new file mode 100644 index 00000000..c685b25d --- /dev/null +++ b/src/generator/ci_generation/schema.rs @@ -0,0 +1,179 @@ +//! CI Pipeline Schema — CI-14 +//! +//! Defines the canonical, platform-agnostic `CiPipeline` intermediate +//! representation. All template builders render from this struct, not +//! directly from `CiContext`. This decouples context collection from +//! output formatting and allows future agent patching of individual steps. + +use serde::Serialize; + +use crate::cli::{CiFormat, CiPlatform}; + +// ── Unresolved token ────────────────────────────────────────────────────────── + +/// A placeholder that could not be filled deterministically from project files. +/// +/// Serialised into `ci-manifest.toml [unresolved]` so the agent fill phase +/// and interactive prompts know exactly what still needs a human decision. +#[derive(Debug, Clone, Serialize)] +pub struct UnresolvedToken { + /// Token name as it appears in the YAML output, e.g. `"REGISTRY_URL"`. + pub name: String, + /// The `{{TOKEN_NAME}}` string injected into the generated YAML. + pub placeholder: String, + /// Human-readable hint for what value to supply. + pub hint: String, + /// Type annotation used in the manifest file (e.g. `"string"`, `"url"`). + pub token_type: String, +} + +impl UnresolvedToken { + pub fn new(name: &str, hint: &str, token_type: &str) -> Self { + Self { + name: name.to_string(), + placeholder: format!("{{{{{}}}}}", name), + hint: hint.to_string(), + token_type: token_type.to_string(), + } + } +} + +// ── Step structs ────────────────────────────────────────────────────────────── + +/// Trigger events that start the CI workflow. +#[derive(Debug, Clone, Serialize)] +pub struct TriggerConfig { + /// Branches that trigger the workflow on push. + pub push_branches: Vec, + /// Branches that trigger the workflow on pull request. + pub pr_branches: Vec, + /// Optional tag pattern (e.g. `"v*"`) for release triggers. + pub tag_pattern: Option, + /// Optional cron schedule expression. + pub scheduled: Option, +} + +/// Runtime / toolchain setup step. +#[derive(Debug, Clone, Serialize)] +pub struct RuntimeStep { + /// GitHub Actions action identifier, e.g. `"actions/setup-node@v4"`. + pub action: String, + /// Resolved version string or `{{RUNTIME_VERSION}}` placeholder. + pub version: String, +} + +/// Dependency cache step (`actions/cache`). +#[derive(Debug, Clone, Serialize)] +pub struct CacheStep { + pub paths: Vec, + pub key: String, + pub restore_keys: Vec, +} + +/// Package install step. +#[derive(Debug, Clone, Serialize)] +pub struct InstallStep { + /// Shell command to install dependencies, e.g. `"npm ci"`. + pub command: String, +} + +/// Lint step — omitted entirely when no linter is detected. +#[derive(Debug, Clone, Serialize)] +pub struct LintStep { + pub command: String, +} + +/// Test step with optional coverage output. +#[derive(Debug, Clone, Serialize)] +pub struct TestStep { + /// Primary test invocation command. + pub command: String, + /// Optional coverage flag appended to the test command. + pub coverage_flag: Option, + /// Relative path to the coverage report file, if produced. + pub coverage_report_path: Option, +} + +/// Build / compile step. +#[derive(Debug, Clone, Serialize)] +pub struct BuildStep { + pub command: String, + /// Relative path to the build output used by the artifact upload step. + pub artifact_path: Option, +} + +/// Docker build and optional push step. +#[derive(Debug, Clone, Serialize)] +pub struct DockerBuildStep { + /// Full image reference including tag, e.g. `"ghcr.io/org/app:${{ github.sha }}"`. + pub image_tag: String, + /// Whether to push the image as part of the CI job. + pub push: bool, + /// Enable multi-platform QEMU cross-compilation via `docker/setup-qemu-action`. + pub qemu: bool, + /// Whether to set up a multi-platform Buildx builder via `docker/setup-buildx-action`. + pub buildx: bool, +} + +/// Container image security scan step (Trivy via `aquasecurity/trivy-action`). +#[derive(Debug, Clone, Serialize)] +pub struct ImageScanStep { + /// Image reference to scan — typically matches `DockerBuildStep.image_tag`. + pub image_ref: String, + /// Comma-separated severity levels that trigger a non-zero exit, e.g. `"CRITICAL,HIGH"`. + pub fail_on_severity: String, + /// Output format for the scan report (`"sarif"`, `"table"`, etc.). + pub format: String, + /// Output file path for the scan report, e.g. `"trivy-results.sarif"`. + pub output: String, + /// Whether to upload the SARIF report to the GitHub Security tab. + pub upload_sarif: bool, +} + +/// Secret / credential leak scan step (Gitleaks via `gitleaks/gitleaks-action@v2`) — always emitted. +#[derive(Debug, Clone, Serialize)] +pub struct SecretScanStep { + /// `${{ secrets.GITHUB_TOKEN }}` — always available in Actions, never a placeholder. + pub github_token_expr: String, + /// Repository secret name for the Gitleaks licence key. + /// `None` for open-source repos (no licence required). + /// `Some("GITLEAKS_LICENSE")` when a private-repo licence is detected or requested. + pub gitleaks_license_secret: Option, +} + +/// Artifact upload step. +#[derive(Debug, Clone, Serialize)] +pub struct ArtifactStep { + /// Display name for the artifact in the GitHub Actions UI. + pub name: String, + /// Path glob for files to upload, e.g. `"dist/**"`. + pub path: String, +} + +// ── Top-level pipeline ──────────────────────────────────────────────────────── + +/// Platform-agnostic intermediate representation of a complete CI pipeline. +/// +/// Template builders (CI-11, CI-12, CI-13) render YAML from this struct. +/// The agent fill phase patches individual fields without re-running full +/// context collection. +#[derive(Debug, Clone, Serialize)] +pub struct CiPipeline { + pub project_name: String, + pub platform: CiPlatform, + pub format: CiFormat, + pub triggers: TriggerConfig, + pub runtime: RuntimeStep, + pub cache: Option, + pub install: InstallStep, + pub lint: Option, + pub test: TestStep, + pub build: Option, + pub docker_build: Option, + pub image_scan: Option, + pub secret_scan: SecretScanStep, + pub upload_artifact: Option, + /// Tokens that could not be resolved deterministically. + pub unresolved_tokens: Vec, +} + diff --git a/src/generator/ci_generation/secret_scan_step.rs b/src/generator/ci_generation/secret_scan_step.rs new file mode 100644 index 00000000..900175d3 --- /dev/null +++ b/src/generator/ci_generation/secret_scan_step.rs @@ -0,0 +1,45 @@ +//! Secret / Credential Leak Scan Step Generator — CI-10 +//! +//! Always emitted regardless of platform or language. Gitleaks runs on the +//! repository checkout — no Docker image or build artifact required. + +use crate::generator::ci_generation::schema::SecretScanStep; + +/// Returns a `SecretScanStep`. Unconditional — every pipeline gets this step. +pub fn generate_secret_scan_step() -> SecretScanStep { + SecretScanStep { + github_token_expr: "${{ secrets.GITHUB_TOKEN }}".to_string(), + gitleaks_license_secret: None, + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_secret_scan_step_is_always_produced() { + let _ = generate_secret_scan_step(); + } + + #[test] + fn test_github_token_is_builtin_expression() { + let step = generate_secret_scan_step(); + assert_eq!(step.github_token_expr, "${{ secrets.GITHUB_TOKEN }}"); + } + + #[test] + fn test_gitleaks_license_defaults_to_none() { + let step = generate_secret_scan_step(); + assert!(step.gitleaks_license_secret.is_none()); + } + + #[test] + fn test_secret_scan_step_serializes() { + let step = generate_secret_scan_step(); + let serialized = serde_json::to_string(&step); + assert!(serialized.is_ok()); + } +} diff --git a/src/generator/ci_generation/secrets_doc.rs b/src/generator/ci_generation/secrets_doc.rs new file mode 100644 index 00000000..d8f5748c --- /dev/null +++ b/src/generator/ci_generation/secrets_doc.rs @@ -0,0 +1,688 @@ +//! CI-19 / DEV-43 — Secrets Inventory Generator +//! +//! Scans a rendered CI pipeline YAML for secret references, deduplicates +//! them, and formats a `SECRETS_REQUIRED.md` document that tells the user +//! exactly which repository secrets to create and how to obtain them +//! +//! +use std::collections::{BTreeMap, BTreeSet}; +use std::path::Path; + +use crate::cli::{CiFormat, CiPlatform}; + +// ── Secret metadata ─────────────────────────────────────────────────────────── + +/// A single secret entry in the generated document. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct SecretEntry { + pub name: String, + pub description: String, + pub how_to_obtain: String, + pub where_to_set: String, + pub required: bool, +} + +impl SecretEntry { + fn new(name: &str, description: &str, how_to_obtain: &str, where_to_set: &str, required: bool) -> Self { + Self { + name: name.to_string(), + description: description.to_string(), + how_to_obtain: how_to_obtain.to_string(), + where_to_set: where_to_set.to_string(), + required, + } + } +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Scans `yaml` for secret references and returns a `SECRETS_REQUIRED.md` +/// document body as a `String`. +/// +/// `platform` and `format` are used to emit platform-specific setup instructions +/// and to choose which regex patterns to apply. +pub fn generate_secrets_doc(yaml: &str, platform: CiPlatform, format: CiFormat) -> String { + let names = collect_secret_names(yaml, &format); + let entries = enrich_secrets(names, &platform); + render_markdown(&entries, &platform) +} + +/// Writes `.syncable/SECRETS_REQUIRED.md` to `output_dir`. +/// +/// Creates the `.syncable/` subdirectory if it does not exist. +pub fn write_secrets_doc( + yaml: &str, + platform: CiPlatform, + format: CiFormat, + output_dir: &Path, +) -> crate::Result<()> { + let content = generate_secrets_doc(yaml, platform, format); + let syncable_dir = output_dir.join(".syncable"); + std::fs::create_dir_all(&syncable_dir)?; + std::fs::write(syncable_dir.join("SECRETS_REQUIRED.md"), content)?; + Ok(()) +} + +/// Produces a single `SECRETS_REQUIRED.md` covering **all** supplied platforms. +/// +/// Each element of `platforms` is a `(platform, format, rendered_yaml)` triple. +/// The resulting document has one top-level `##` heading per platform so that +/// a user who generates pipelines for multiple targets (or checks this file +/// into the repository for reference) sees a complete, per-platform breakdown +/// of every secret needed. +/// +/// Secrets that appear under multiple platforms are documented independently in +/// each section with platform-appropriate "where to set" instructions. +pub fn generate_secrets_doc_all_platforms( + platforms: &[(CiPlatform, CiFormat, &str)], +) -> String { + if platforms.is_empty() { + return "# Secrets Required\n\nNo pipeline platforms supplied.\n".to_string(); + } + + let mut out = String::new(); + out.push_str("# Secrets Required\n\n"); + out.push_str("Generated by `sync-ctl generate ci`.\n\n"); + + let all_empty = platforms + .iter() + .all(|(_, fmt, yaml)| collect_secret_names(yaml, fmt).is_empty()); + + if all_empty { + out.push_str("No secrets detected in any generated pipeline.\n"); + return out; + } + + out.push_str("---\n\n"); + + for (platform, format, yaml) in platforms { + let names = collect_secret_names(yaml, format); + let platform_label = platform_display_label(platform); + + out.push_str(&format!("## {}\n\n", platform_label)); + + if names.is_empty() { + out.push_str("_No secrets detected for this platform._\n\n"); + continue; + } + + let entries = enrich_secrets(names, platform); + let required: Vec<_> = entries.iter().filter(|e| e.required).collect(); + let optional: Vec<_> = entries.iter().filter(|e| !e.required).collect(); + + if !required.is_empty() { + out.push_str("### Required\n\n"); + out.push_str(table_header()); + for e in &required { out.push_str(&table_row(e)); } + out.push('\n'); + } + if !optional.is_empty() { + out.push_str("### Optional\n\n"); + out.push_str(table_header()); + for e in &optional { out.push_str(&table_row(e)); } + out.push('\n'); + } + } + + out +} + +fn platform_display_label(platform: &CiPlatform) -> &'static str { + match platform { + CiPlatform::Azure => "Azure Pipelines", + CiPlatform::Gcp => "GCP Cloud Build", + CiPlatform::Hetzner => "Hetzner / GitHub Actions", + } +} + +/// Returns just the deduplicated set of secret names found in `yaml`. +/// Exposed for testing. +/// +/// Deserializes `yaml` into a `serde_yaml::Value` tree and walks only string +/// scalar nodes, so key names, anchors, and YAML comments never produce false +/// positives. Falls back to scanning the raw string when the YAML is not +/// parseable (e.g. a partially-rendered template with `{{PLACEHOLDER}}` +/// tokens that make the YAML invalid). +pub fn collect_secret_names(yaml: &str, format: &CiFormat) -> BTreeSet { + // Collect all string scalar values from the YAML value tree. If the + // document is not valid YAML (e.g. contains unquoted `{{...}}` tokens) + // fall back to scanning the raw string directly. + let scalars: Vec = match serde_yaml::from_str::(yaml) { + Ok(root) => collect_string_scalars(&root), + Err(_) => vec![yaml.to_string()], + }; + + let mut names = BTreeSet::new(); + for scalar in &scalars { + scan_scalar_for_secrets(scalar, format, &mut names); + } + names +} + +/// Recursively collects every string scalar value from a `serde_yaml::Value` tree. +fn collect_string_scalars(value: &serde_yaml::Value) -> Vec { + match value { + serde_yaml::Value::String(s) => vec![s.clone()], + serde_yaml::Value::Sequence(seq) => { + seq.iter().flat_map(collect_string_scalars).collect() + } + serde_yaml::Value::Mapping(map) => { + map.values().flat_map(collect_string_scalars).collect() + } + // Booleans, numbers, null — never contain secret references. + _ => vec![], + } +} + +/// Applies platform-specific patterns against a single string scalar. +fn scan_scalar_for_secrets(scalar: &str, format: &CiFormat, names: &mut BTreeSet) { + use regex::Regex; + + match format { + CiFormat::GithubActions => { + // ${{ secrets.NAME }} + let re = Regex::new(r"\$\{\{\s*secrets\.([A-Z0-9_]+)\s*\}\}").unwrap(); + for cap in re.captures_iter(scalar) { + if let Some(m) = cap.get(1) { names.insert(m.as_str().to_string()); } + } + } + CiFormat::AzurePipelines => { + // $(VARIABLE_NAME) — all-caps names only (lower-case are pipeline vars, not secrets) + let re_paren = Regex::new(r"\$\(([A-Z][A-Z0-9_]+)\)").unwrap(); + for cap in re_paren.captures_iter(scalar) { + if let Some(m) = cap.get(1) { names.insert(m.as_str().to_string()); } + } + // Also catch GH-style secrets accidentally present (mixed templates) + let re_gh = Regex::new(r"\$\{\{\s*secrets\.([A-Z0-9_]+)\s*\}\}").unwrap(); + for cap in re_gh.captures_iter(scalar) { + if let Some(m) = cap.get(1) { names.insert(m.as_str().to_string()); } + } + } + CiFormat::CloudBuild => { + // $$_VARIABLE (double-dollar escape used in some GCP docs) + let re_dd = Regex::new(r"\$\$([_A-Z][A-Z0-9_]*)").unwrap(); + for cap in re_dd.captures_iter(scalar) { + if let Some(m) = cap.get(1) { names.insert(m.as_str().to_string()); } + } + // $_VARIABLE — standard Cloud Build user substitution + let re_sub = Regex::new(r"\$_([A-Z][A-Z0-9_]*)").unwrap(); + for cap in re_sub.captures_iter(scalar) { + if let Some(m) = cap.get(1) { names.insert(m.as_str().to_string()); } + } + } + } +} + +// ── Knowledge base ──────────────────────────────────────────────────────────── + +/// Builds a map of well-known secret names → `SecretEntry` metadata. +fn known_secrets() -> BTreeMap<&'static str, SecretEntry> { + let mut m = BTreeMap::new(); + + m.insert("GITHUB_TOKEN", SecretEntry::new( + "GITHUB_TOKEN", + "GitHub-issued token for Actions API access. Automatically available in all GitHub Actions runs.", + "No action required — GitHub injects this automatically.", + "Injected automatically — no manual secret needed.", + true, + )); + m.insert("GITLEAKS_LICENSE", SecretEntry::new( + "GITLEAKS_LICENSE", + "Gitleaks commercial licence key (required for private repositories only).", + "Purchase at https://gitleaks.io/ Ā· Then add as a repository secret.", + "GitHub repo → Settings → Secrets and variables → Actions → New repository secret.", + false, + )); + m.insert("CODECOV_TOKEN", SecretEntry::new( + "CODECOV_TOKEN", + "API token for uploading coverage reports to Codecov.", + "Sign in at https://app.codecov.io/ Ā· Navigate to your repo Ā· Copy the upload token.", + "GitHub repo → Settings → Secrets and variables → Actions → New repository secret.", + false, + )); + m.insert("SLACK_BOT_TOKEN", SecretEntry::new( + "SLACK_BOT_TOKEN", + "Slack bot OAuth token for posting CI failure notifications.", + "Create a Slack app at https://api.slack.com/apps Ā· Add `chat:write` scope Ā· Install to workspace.", + "GitHub repo → Settings → Secrets and variables → Actions → New repository secret.", + false, + )); + m.insert("SLACK_CHANNEL_ID", SecretEntry::new( + "SLACK_CHANNEL_ID", + "Slack channel ID where CI failure notifications are posted.", + "Right-click the channel in Slack → Copy link — the ID is the last segment (e.g. `C012AB3CD`).", + "GitHub repo → Settings → Secrets and variables → Actions → New repository secret.", + false, + )); + + // Docker / container registry secrets + for name in &["DOCKER_USERNAME", "DOCKER_PASSWORD", "DOCKER_TOKEN"] { + m.insert(name, SecretEntry::new( + name, + "Docker Hub credentials for pushing container images.", + "Create an access token at https://hub.docker.com/settings/security Ā· Store username and token separately.", + "GitHub repo → Settings → Secrets and variables → Actions → New repository secret.", + true, + )); + } + for name in &["ACR_LOGIN_SERVER", "ACR_USERNAME", "ACR_PASSWORD"] { + m.insert(name, SecretEntry::new( + name, + "Azure Container Registry credentials.", + "Azure Portal → Container registries → [your registry] → Access keys.", + "Azure DevOps → Pipelines → Library **or** GitHub repo → Settings → Secrets and variables → Actions.", + true, + )); + } + for name in &["GCP_SA_KEY", "GCP_PROJECT_ID"] { + m.insert(name, SecretEntry::new( + name, + "GCP service account key / project ID for pushing images to Artifact Registry.", + "GCP Console → IAM & Admin → Service Accounts → Create key (JSON).", + "GCP Secret Manager **or** GitHub repo → Settings → Secrets and variables → Actions.", + true, + )); + } + + m +} + +/// Converts a set of raw secret names into enriched `SecretEntry` values. +fn enrich_secrets(names: BTreeSet, platform: &CiPlatform) -> Vec { + let known = known_secrets(); + names + .into_iter() + .map(|name| { + known.get(name.as_str()).cloned().unwrap_or_else(|| SecretEntry::new( + &name, + "Project-specific secret — description not yet documented.", + "Add the value as a repository or pipeline secret.", + &generic_where_to_set(platform), + true, + )) + }) + .collect() +} + +/// Returns the platform-appropriate "where to set" guidance for an unknown secret. +fn generic_where_to_set(platform: &CiPlatform) -> String { + match platform { + CiPlatform::Hetzner => { + "GitHub repo → Settings → Secrets and variables → Actions → New repository secret." + .to_string() + } + CiPlatform::Azure => { + "Azure DevOps → Pipelines → Library → Variable groups → Add variable (mark as secret)." + .to_string() + } + CiPlatform::Gcp => { + "GCP Console → Secret Manager → Create secret **or** Cloud Build trigger → Substitution variables." + .to_string() + } + } +} + +// ── Markdown renderer ───────────────────────────────────────────────────────── + +fn render_markdown(entries: &[SecretEntry], platform: &CiPlatform) -> String { + if entries.is_empty() { + return "# Secrets Required\n\nNo secrets detected in the generated pipeline.\n".to_string(); + } + + let platform_label = platform_display_label(platform); + + let required: Vec<_> = entries.iter().filter(|e| e.required).collect(); + let optional: Vec<_> = entries.iter().filter(|e| !e.required).collect(); + + let mut out = String::new(); + out.push_str("# Secrets Required\n\n"); + out.push_str(&format!( + "Generated by `sync-ctl generate ci` for platform **{}**.\n\n", + platform_label + )); + out.push_str("---\n\n"); + + if !required.is_empty() { + out.push_str("## Required\n\n"); + out.push_str(table_header()); + for e in &required { + out.push_str(&table_row(e)); + } + out.push('\n'); + } + + if !optional.is_empty() { + out.push_str("## Optional\n\n"); + out.push_str(table_header()); + for e in &optional { + out.push_str(&table_row(e)); + } + out.push('\n'); + } + + out +} + +fn table_header() -> &'static str { + "| Secret Name | Description | How to obtain | Where to set |\n\ + |---|---|---|---|\n" +} + +fn table_row(e: &SecretEntry) -> String { + format!( + "| `{}` | {} | {} | {} |\n", + e.name, e.description, e.how_to_obtain, e.where_to_set + ) +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + // ── collect_secret_names ─────────────────────────────────────────────── + + #[test] + fn test_github_actions_secrets_extracted() { + let yaml = r#" +env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + OTHER: ${{ secrets.MY_TOKEN }} +"#; + let names = collect_secret_names(yaml, &CiFormat::GithubActions); + assert!(names.contains("GITHUB_TOKEN")); + assert!(names.contains("MY_TOKEN")); + } + + #[test] + fn test_github_actions_lowercase_secrets_ignored() { + // Secret names in the patterns must be uppercase — lowercase vars are not secrets + let yaml = "run: echo ${{ env.foo }}"; + let names = collect_secret_names(yaml, &CiFormat::GithubActions); + assert!(names.is_empty()); + } + + #[test] + fn test_azure_dollar_paren_secrets_extracted() { + let yaml = "value: $(ACR_PASSWORD)\nother: $(System.AccessToken)"; + let names = collect_secret_names(yaml, &CiFormat::AzurePipelines); + assert!(names.contains("ACR_PASSWORD")); + } + + #[test] + fn test_cloud_build_dollar_dollar_extracted() { + let yaml = "env:\n - GITHUB_TOKEN=$$_GITHUB_TOKEN"; + let names = collect_secret_names(yaml, &CiFormat::CloudBuild); + assert!(names.contains("_GITHUB_TOKEN")); + } + + #[test] + fn test_cloud_build_dollar_underscore_extracted() { + let yaml = "args: [\"$_GCP_PROJECT_ID\"]"; + let names = collect_secret_names(yaml, &CiFormat::CloudBuild); + assert!(names.contains("GCP_PROJECT_ID")); + } + + #[test] + fn test_deduplication() { + let yaml = r#" +env: + TOKEN: ${{ secrets.GITHUB_TOKEN }} + OTHER: ${{ secrets.GITHUB_TOKEN }} +"#; + let names = collect_secret_names(yaml, &CiFormat::GithubActions); + assert_eq!(names.len(), 1); + assert!(names.contains("GITHUB_TOKEN")); + } + + #[test] + fn test_empty_yaml_gives_empty_set() { + let names = collect_secret_names("steps: []", &CiFormat::GithubActions); + assert!(names.is_empty()); + } + + // ── enrich_secrets ───────────────────────────────────────────────────── + + #[test] + fn test_known_secret_enriched() { + let mut names = BTreeSet::new(); + names.insert("GITHUB_TOKEN".to_string()); + let entries = enrich_secrets(names, &CiPlatform::Hetzner); + assert_eq!(entries.len(), 1); + assert_eq!(entries[0].name, "GITHUB_TOKEN"); + assert!(entries[0].required); + assert!(entries[0].description.contains("GitHub-issued")); + } + + #[test] + fn test_unknown_secret_gets_generic_entry() { + let mut names = BTreeSet::new(); + names.insert("MY_CUSTOM_API_KEY".to_string()); + let entries = enrich_secrets(names, &CiPlatform::Hetzner); + assert_eq!(entries[0].name, "MY_CUSTOM_API_KEY"); + assert!(entries[0].description.contains("Project-specific")); + } + + #[test] + fn test_gitleaks_license_is_optional() { + let mut names = BTreeSet::new(); + names.insert("GITLEAKS_LICENSE".to_string()); + let entries = enrich_secrets(names, &CiPlatform::Hetzner); + assert!(!entries[0].required); + } + + // ── generate_secrets_doc ─────────────────────────────────────────────── + + #[test] + fn test_doc_contains_required_heading() { + let yaml = "env:\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}"; + let doc = generate_secrets_doc(yaml, CiPlatform::Hetzner, CiFormat::GithubActions); + assert!(doc.contains("## Required")); + assert!(doc.contains("GITHUB_TOKEN")); + } + + #[test] + fn test_doc_contains_optional_section_for_gitleaks() { + let yaml = r#" +env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GL: ${{ secrets.GITLEAKS_LICENSE }} +"#; + let doc = generate_secrets_doc(yaml, CiPlatform::Hetzner, CiFormat::GithubActions); + assert!(doc.contains("## Optional")); + assert!(doc.contains("GITLEAKS_LICENSE")); + } + + #[test] + fn test_doc_no_secrets_message() { + let doc = generate_secrets_doc("steps: []", CiPlatform::Gcp, CiFormat::CloudBuild); + assert!(doc.contains("No secrets detected")); + } + + #[test] + fn test_doc_platform_label_azure() { + let yaml = "value: $(ACR_PASSWORD)"; + let doc = generate_secrets_doc(yaml, CiPlatform::Azure, CiFormat::AzurePipelines); + assert!(doc.contains("Azure")); + } + + #[test] + fn test_doc_platform_label_gcp() { + let yaml = "env:\n - TOKEN=$$_GITHUB_TOKEN"; + let doc = generate_secrets_doc(yaml, CiPlatform::Gcp, CiFormat::CloudBuild); + assert!(doc.contains("GCP")); + } + + #[test] + fn test_doc_is_valid_markdown_table() { + let yaml = "env:\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}"; + let doc = generate_secrets_doc(yaml, CiPlatform::Hetzner, CiFormat::GithubActions); + // Table has a header row and separator row + assert!(doc.contains("| Secret Name | Description | How to obtain |")); + assert!(doc.contains("|---|---|---|")); + } + + #[test] + fn test_doc_entries_sorted_alphabetically() { + let yaml = r#" +env: + B: ${{ secrets.BETA_TOKEN }} + A: ${{ secrets.ALPHA_TOKEN }} +"#; + let doc = generate_secrets_doc(yaml, CiPlatform::Hetzner, CiFormat::GithubActions); + let alpha_pos = doc.find("ALPHA_TOKEN").unwrap(); + let beta_pos = doc.find("BETA_TOKEN").unwrap(); + assert!(alpha_pos < beta_pos, "entries should be sorted A-Z"); + } + + // ── where_to_set + write_secrets_doc ─────────────────────────────────── + + #[test] + fn test_where_to_set_field_populated_on_known_secret() { + let mut names = BTreeSet::new(); + names.insert("GITHUB_TOKEN".to_string()); + let entries = enrich_secrets(names, &CiPlatform::Hetzner); + assert!(!entries[0].where_to_set.is_empty()); + assert!(entries[0].where_to_set.contains("Injected automatically")); + } + + #[test] + fn test_where_to_set_field_populated_on_unknown_secret() { + let mut names = BTreeSet::new(); + names.insert("MY_CUSTOM_KEY".to_string()); + let entries = enrich_secrets(names, &CiPlatform::Hetzner); + assert!(entries[0].where_to_set.contains("Settings → Secrets")); + } + + #[test] + fn test_where_to_set_column_in_table() { + let yaml = "env:\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}"; + let doc = generate_secrets_doc(yaml, CiPlatform::Hetzner, CiFormat::GithubActions); + assert!(doc.contains("| Secret Name | Description | How to obtain | Where to set |")); + assert!(doc.contains("|---|---|---|---|")); + } + + #[test] + fn test_write_secrets_doc_creates_file() { + let tmp = std::env::temp_dir().join(format!( + "syncable_secrets_test_{}", + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .subsec_nanos() + )); + std::fs::create_dir_all(&tmp).unwrap(); + let yaml = "env:\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}"; + write_secrets_doc(yaml, CiPlatform::Hetzner, CiFormat::GithubActions, &tmp).unwrap(); + let out = tmp.join(".syncable").join("SECRETS_REQUIRED.md"); + assert!(out.exists(), "SECRETS_REQUIRED.md should be created"); + let content = std::fs::read_to_string(&out).unwrap(); + assert!(content.contains("GITHUB_TOKEN")); + assert!(content.contains("Where to set")); + std::fs::remove_dir_all(&tmp).ok(); + } + + // ── serde_yaml scalar walker ─────────────────────────────────────────── + + #[test] + fn test_yaml_key_names_are_not_matched_as_secrets() { + // "GITHUB_TOKEN" appears as a YAML *key*, not a value — should not be extracted. + let yaml = "GITHUB_TOKEN: plain_string_not_a_secret_reference"; + let names = collect_secret_names(yaml, &CiFormat::GithubActions); + assert!(names.is_empty(), "key names must not be scanned"); + } + + #[test] + fn test_secrets_in_nested_mapping_values_are_found() { + let yaml = r#" +jobs: + ci: + env: + TOKEN: ${{ secrets.DEEP_SECRET }} +"#; + let names = collect_secret_names(yaml, &CiFormat::GithubActions); + assert!(names.contains("DEEP_SECRET")); + } + + #[test] + fn test_secrets_in_sequence_items_are_found() { + let yaml = "args:\n - \"$_MY_SECRET\"\n - \"other\""; + let names = collect_secret_names(yaml, &CiFormat::CloudBuild); + assert!(names.contains("MY_SECRET")); + } + + #[test] + fn test_invalid_yaml_falls_back_to_raw_scan() { + // Unquoted {{ }} makes this invalid YAML — scanner must fall back gracefully. + let yaml = "run: deploy --token {{PLACEHOLDER}} --key ${{ secrets.FALLBACK_KEY }}"; + let names = collect_secret_names(yaml, &CiFormat::GithubActions); + assert!(names.contains("FALLBACK_KEY")); + } + + // ── platform-specific where_to_set ──────────────────────────────────── + + #[test] + fn test_unknown_secret_azure_gets_devops_instructions() { + let mut names = BTreeSet::new(); + names.insert("MY_AZURE_KEY".to_string()); + let entries = enrich_secrets(names, &CiPlatform::Azure); + assert!(entries[0].where_to_set.contains("Azure DevOps"), + "Azure unknown secrets should reference Azure DevOps Library"); + } + + #[test] + fn test_unknown_secret_gcp_gets_secret_manager_instructions() { + let mut names = BTreeSet::new(); + names.insert("MY_GCP_KEY".to_string()); + let entries = enrich_secrets(names, &CiPlatform::Gcp); + assert!(entries[0].where_to_set.contains("Secret Manager"), + "GCP unknown secrets should reference Secret Manager"); + } + + // ── generate_secrets_doc_all_platforms ──────────────────────────────── + + #[test] + fn test_all_platforms_doc_has_per_platform_headings() { + let hetzner_yaml = "env:\n T: ${{ secrets.GITHUB_TOKEN }}"; + let azure_yaml = "value: $(ACR_PASSWORD)"; + let gcp_yaml = "args: [\"$_GCP_PROJECT_ID\"]"; + + let platforms = vec![ + (CiPlatform::Hetzner, CiFormat::GithubActions, hetzner_yaml), + (CiPlatform::Azure, CiFormat::AzurePipelines, azure_yaml), + (CiPlatform::Gcp, CiFormat::CloudBuild, gcp_yaml), + ]; + + let doc = generate_secrets_doc_all_platforms(&platforms); + assert!(doc.contains("## Hetzner / GitHub Actions")); + assert!(doc.contains("## Azure Pipelines")); + assert!(doc.contains("## GCP Cloud Build")); + } + + #[test] + fn test_all_platforms_doc_empty_platform_shows_note() { + let platforms = vec![ + (CiPlatform::Hetzner, CiFormat::GithubActions, "steps: []"), + ]; + let doc = generate_secrets_doc_all_platforms(&platforms); + assert!(doc.contains("No secrets detected")); + } + + #[test] + fn test_all_platforms_doc_empty_input() { + let doc = generate_secrets_doc_all_platforms(&[]); + assert!(doc.contains("No pipeline platforms supplied")); + } + + #[test] + fn test_all_platforms_doc_required_optional_split() { + let yaml = "env:\n A: ${{ secrets.GITHUB_TOKEN }}\n B: ${{ secrets.GITLEAKS_LICENSE }}"; + let platforms = vec![(CiPlatform::Hetzner, CiFormat::GithubActions, yaml)]; + let doc = generate_secrets_doc_all_platforms(&platforms); + assert!(doc.contains("### Required")); + assert!(doc.contains("### Optional")); + assert!(doc.contains("GITHUB_TOKEN")); + assert!(doc.contains("GITLEAKS_LICENSE")); + } +} diff --git a/src/generator/ci_generation/templates/azure_pipelines.rs b/src/generator/ci_generation/templates/azure_pipelines.rs new file mode 100644 index 00000000..2754a2c9 --- /dev/null +++ b/src/generator/ci_generation/templates/azure_pipelines.rs @@ -0,0 +1,613 @@ +//! Azure Pipelines CI Template Builder — CI-12 +//! +//! Generates `azure-pipelines.yml` from `CiPipeline` by mapping each step +//! to the Azure Pipelines task vocabulary: +//! +//! - Runtime setup → `NodeTool@0` / `UsePythonVersion@0` / `GoTool@0` / script +//! - Cache → `Cache@2` +//! - Shell steps → `script:` with `displayName:` +//! - Artifact upload→ `PublishBuildArtifacts@1` +//! - Trivy / Gitleaks → inline `script:` (no native Azure task) +//! +//! Azure auto-checks-out the repo before any steps, so no explicit step +//! is emitted for that. + +use std::collections::BTreeMap; + +use serde::Serialize; + +use crate::generator::ci_generation::schema::CiPipeline; + +// ── YAML document structs ───────────────────────────────────────────────────── + +#[derive(Serialize)] +struct AzurePipeline { + trigger: AzureTrigger, + pr: AzurePr, + #[serde(skip_serializing_if = "Option::is_none")] + schedules: Option>, + pool: Pool, + steps: Vec, +} + +#[derive(Serialize)] +struct AzureTrigger { + branches: BranchFilter, + #[serde(skip_serializing_if = "Option::is_none")] + tags: Option, +} + +#[derive(Serialize)] +struct AzurePr { + branches: BranchFilter, +} + +#[derive(Serialize)] +struct BranchFilter { + include: Vec, +} + +#[derive(Serialize)] +struct TagFilter { + include: Vec, +} + +#[derive(Serialize)] +struct AzureSchedule { + cron: String, + #[serde(rename = "displayName")] + display_name: String, + branches: BranchFilter, + always: bool, +} + +#[derive(Serialize)] +struct Pool { + #[serde(rename = "vmImage")] + vm_image: String, +} + +/// A single pipeline step. Either `task:` or `script:` will be set, never both. +/// All fields default to `None` so optional keys are omitted from the YAML output. +#[derive(Serialize, Default)] +struct AzureStep { + #[serde(skip_serializing_if = "Option::is_none")] + task: Option, + #[serde(skip_serializing_if = "Option::is_none")] + script: Option, + #[serde(rename = "displayName", skip_serializing_if = "Option::is_none")] + display_name: Option, + #[serde(skip_serializing_if = "Option::is_none")] + inputs: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + env: Option>, +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Renders a `CiPipeline` into an Azure Pipelines YAML string. +/// +/// The returned string is suitable for writing to `azure-pipelines.yml` +/// at the repository root. +pub fn render(pipeline: &CiPipeline) -> String { + let doc = build_pipeline(pipeline); + serde_yaml::to_string(&doc) + .expect("AzurePipeline serialisation is infallible for valid CiPipeline") +} + +// ── Builder ─────────────────────────────────────────────────────────────────── + +fn build_pipeline(pipeline: &CiPipeline) -> AzurePipeline { + let triggers = &pipeline.triggers; + AzurePipeline { + trigger: AzureTrigger { + branches: BranchFilter { include: triggers.push_branches.clone() }, + tags: triggers.tag_pattern.as_ref().map(|p| TagFilter { include: vec![p.clone()] }), + }, + pr: AzurePr { + branches: BranchFilter { include: triggers.pr_branches.clone() }, + }, + schedules: triggers.scheduled.as_ref().map(|cron| { + vec![AzureSchedule { + cron: cron.clone(), + display_name: "Scheduled build".to_string(), + branches: BranchFilter { include: triggers.push_branches.clone() }, + always: true, + }] + }), + pool: Pool { vm_image: "ubuntu-latest".to_string() }, + steps: build_steps(pipeline), + } +} + +fn build_steps(pipeline: &CiPipeline) -> Vec { + let mut steps: Vec = Vec::new(); + + // 1. Runtime setup + match azure_runtime_task(&pipeline.runtime.action) { + Some((task_name, input_key)) => { + let mut inputs = BTreeMap::new(); + inputs.insert(input_key.to_string(), pipeline.runtime.version.clone()); + steps.push(AzureStep { + task: Some(task_name.to_string()), + display_name: Some("Set up runtime".to_string()), + inputs: Some(inputs), + ..Default::default() + }); + } + None => { + // Rust and unknown runtimes — rustup handles toolchain install + steps.push(AzureStep { + script: Some(format!("rustup default {}", pipeline.runtime.version)), + display_name: Some("Set up runtime".to_string()), + ..Default::default() + }); + } + } + + // 2. Cache (optional) + if let Some(cache) = &pipeline.cache { + let mut inputs = BTreeMap::new(); + inputs.insert("key".to_string(), gh_cache_key_to_azure(&cache.key)); + inputs.insert("path".to_string(), cache.paths.join("\n")); + if !cache.restore_keys.is_empty() { + let azure_restore_keys: Vec = + cache.restore_keys.iter().map(|k| gh_cache_key_to_azure(k)).collect(); + inputs.insert("restoreKeys".to_string(), azure_restore_keys.join("\n")); + } + steps.push(AzureStep { + task: Some("Cache@2".to_string()), + display_name: Some("Cache dependencies".to_string()), + inputs: Some(inputs), + ..Default::default() + }); + } + + // 3. Install + steps.push(AzureStep { + script: Some(pipeline.install.command.clone()), + display_name: Some("Install dependencies".to_string()), + ..Default::default() + }); + + // 4. Lint (optional) + if let Some(lint) = &pipeline.lint { + steps.push(AzureStep { + script: Some(lint.command.clone()), + display_name: Some("Lint".to_string()), + ..Default::default() + }); + } + + // 5. Test + let test_cmd = match &pipeline.test.coverage_flag { + Some(flag) => format!("{} {}", pipeline.test.command, flag), + None => pipeline.test.command.clone(), + }; + steps.push(AzureStep { + script: Some(test_cmd), + display_name: Some("Test".to_string()), + ..Default::default() + }); + + // 6. Build (optional) + if let Some(build) = &pipeline.build { + steps.push(AzureStep { + script: Some(build.command.clone()), + display_name: Some("Build".to_string()), + ..Default::default() + }); + } + + // 7. Docker (optional) — no QEMU/Buildx tasks in Azure; plain script steps + if let Some(docker) = &pipeline.docker_build { + steps.push(AzureStep { + script: Some(format!("docker build -t {} .", docker.image_tag)), + display_name: Some("Build Docker image".to_string()), + ..Default::default() + }); + if docker.push { + steps.push(AzureStep { + script: Some(format!("docker push {}", docker.image_tag)), + display_name: Some("Push Docker image".to_string()), + ..Default::default() + }); + } + } + + // 8. Image scan (optional) — Trivy installed inline + if let Some(scan) = &pipeline.image_scan { + let trivy_script = format!( + "curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin\n\ + trivy image --exit-code 1 --severity {} --format {} --output {} {}", + scan.fail_on_severity, scan.format, scan.output, scan.image_ref, + ); + steps.push(AzureStep { + script: Some(trivy_script), + display_name: Some("Scan image (Trivy)".to_string()), + ..Default::default() + }); + } + + // 9. Secret scan (always) — Gitleaks installed inline + let gitleaks_script = + "curl -sSfL https://github.com/gitleaks/gitleaks/releases/latest/download/\ + gitleaks_linux_x64.tar.gz | tar xz -C /usr/local/bin\n\ + gitleaks detect --source . --exit-code 1" + .to_string(); + let mut sec_env = BTreeMap::new(); + // Azure Pipelines variables are accessed via $(VAR_NAME), not ${{ secrets.VAR }} + sec_env.insert( + "GITHUB_TOKEN".to_string(), + "$(GITHUB_TOKEN)".to_string(), + ); + if let Some(license) = &pipeline.secret_scan.gitleaks_license_secret { + sec_env.insert( + "GITLEAKS_LICENSE".to_string(), + format!("$({})", license), + ); + } + steps.push(AzureStep { + script: Some(gitleaks_script), + display_name: Some("Secret scan (Gitleaks)".to_string()), + env: Some(sec_env), + ..Default::default() + }); + + // 10. Artifact upload (optional) + if let Some(artifact) = &pipeline.upload_artifact { + let mut inputs = BTreeMap::new(); + inputs.insert("pathToPublish".to_string(), artifact.path.clone()); + inputs.insert("artifactName".to_string(), artifact.name.clone()); + steps.push(AzureStep { + task: Some("PublishBuildArtifacts@1".to_string()), + display_name: Some("Upload artifact".to_string()), + inputs: Some(inputs), + ..Default::default() + }); + } + + steps +} + +/// Translates a GitHub Actions cache key expression to the Azure Pipelines +/// `Cache@2` key format. +/// +/// Conversions applied: +/// `${{ runner.os }}` → `$(Agent.OS)` +/// `${{ hashFiles('GLOB') }}` → `GLOB` (Azure hashes file content natively) +/// `pm-$(Agent.OS)-glob` → `pm | $(Agent.OS) | glob` +/// +/// `split_once` is used for the separator conversion so that hyphens **inside** +/// file names (e.g. `package-lock.json`) are never corrupted. +fn gh_cache_key_to_azure(key: &str) -> String { + let key = key.replace("${{ runner.os }}", "$(Agent.OS)"); + let key = strip_hash_files_wrapper(&key); + // Rebuild as pipe-separated Azure key. The OS variable is the fixed + // boundary; everything before it is the PM prefix, everything after is + // the lock-file glob. + if let Some((prefix, rest)) = key.split_once("-$(Agent.OS)-") { + let trimmed = rest.trim_end_matches('-'); + let combined = format!("{prefix} | $(Agent.OS) | {trimmed}"); + return combined.trim_end_matches(|c: char| c == ' ' || c == '|').to_string(); + } + // Restore key: `pm-$(Agent.OS)` with no trailing glob. + if let Some((prefix, _)) = key.split_once("-$(Agent.OS)") { + return format!("{prefix} | $(Agent.OS)"); + } + key +} + +/// Removes `${{ hashFiles('GLOB') }}` wrappers, leaving only the glob(s). +/// Inner single-quotes from multi-argument calls are stripped so the result +/// is a clean comma-separated list compatible with Azure `Cache@2`. +fn strip_hash_files_wrapper(s: &str) -> String { + let mut result = s.to_string(); + let prefix = "${{ hashFiles('"; + let suffix = "') }}"; + loop { + match result.find(prefix) { + None => break, + Some(start) => { + let content_start = start + prefix.len(); + match result[content_start..].find(suffix) { + None => break, + Some(rel_end) => { + let content_end = content_start + rel_end; + let full_end = content_end + suffix.len(); + // Strip inner quotes produced by multi-arg hashFiles calls. + let glob = result[content_start..content_end].replace('\'', ""); + result.replace_range(start..full_end, &glob); + } + } + } + } + } + result +} + +/// Maps a GitHub Actions runtime action identifier to the equivalent Azure +/// Pipelines task name and its version-input key. Returns `None` for runtimes +/// that have no native Azure task (e.g. Rust / rust-toolchain). +fn azure_runtime_task(action: &str) -> Option<(&'static str, &'static str)> { + if action.contains("setup-node") { Some(("NodeTool@0", "versionSpec")) } + else if action.contains("setup-python") { Some(("UsePythonVersion@0", "versionSpec")) } + else if action.contains("setup-go") { Some(("GoTool@0", "version")) } + else if action.contains("setup-java") { Some(("JavaToolInstaller@0", "versionSpec")) } + else { None } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::cli::{CiFormat, CiPlatform}; + use crate::generator::ci_generation::schema::{ + ArtifactStep, BuildStep, CacheStep, CiPipeline, DockerBuildStep, ImageScanStep, + InstallStep, LintStep, RuntimeStep, SecretScanStep, TestStep, TriggerConfig, + }; + + fn make_pipeline() -> CiPipeline { + CiPipeline { + project_name: "my-app".to_string(), + platform: CiPlatform::Azure, + format: CiFormat::AzurePipelines, + triggers: TriggerConfig { + push_branches: vec!["main".to_string()], + pr_branches: vec!["main".to_string()], + tag_pattern: None, + scheduled: None, + }, + runtime: RuntimeStep { + action: "actions/setup-node@v4".to_string(), + version: "20".to_string(), + }, + cache: None, + install: InstallStep { command: "npm ci".to_string() }, + lint: None, + test: TestStep { + command: "npx jest".to_string(), + coverage_flag: None, + coverage_report_path: None, + }, + build: None, + docker_build: None, + image_scan: None, + secret_scan: SecretScanStep { + github_token_expr: "$(System.AccessToken)".to_string(), + gitleaks_license_secret: None, + }, + upload_artifact: None, + unresolved_tokens: vec![], + } + } + + #[test] + fn test_render_produces_valid_yaml() { + let output = render(&make_pipeline()); + let parsed: Result = serde_yaml::from_str(&output); + assert!(parsed.is_ok(), "render output must be valid YAML:\n{output}"); + } + + #[test] + fn test_trigger_branches_emitted() { + let output = render(&make_pipeline()); + assert!(output.contains("trigger")); + assert!(output.contains("main")); + } + + #[test] + fn test_pool_vm_image_ubuntu() { + let output = render(&make_pipeline()); + assert!(output.contains("ubuntu-latest")); + } + + #[test] + fn test_node_tool_task_emitted() { + let output = render(&make_pipeline()); + assert!(output.contains("NodeTool@0")); + assert!(output.contains("versionSpec")); + } + + #[test] + fn test_rust_toolchain_uses_script_step() { + let mut p = make_pipeline(); + p.runtime = RuntimeStep { + action: "dtolnay/rust-toolchain@stable".to_string(), + version: "stable".to_string(), + }; + let output = render(&p); + assert!(!output.contains("NodeTool")); + assert!(output.contains("rustup default stable")); + } + + #[test] + fn test_install_command_emitted() { + let output = render(&make_pipeline()); + assert!(output.contains("npm ci")); + } + + #[test] + fn test_lint_omitted_when_none() { + let output = render(&make_pipeline()); + // no displayName: Lint entry + assert!(!output.contains("displayName: Lint")); + } + + #[test] + fn test_lint_present_when_some() { + let mut p = make_pipeline(); + p.lint = Some(LintStep { command: "cargo clippy -- -D warnings".to_string() }); + let output = render(&p); + assert!(output.contains("cargo clippy -- -D warnings")); + } + + #[test] + fn test_test_command_emitted() { + let output = render(&make_pipeline()); + assert!(output.contains("npx jest")); + } + + #[test] + fn test_coverage_flag_appended() { + let mut p = make_pipeline(); + p.test.coverage_flag = Some("--coverage".to_string()); + let output = render(&p); + assert!(output.contains("npx jest --coverage")); + } + + #[test] + fn test_build_omitted_when_none() { + let output = render(&make_pipeline()); + assert!(!output.contains("displayName: Build")); + } + + #[test] + fn test_build_emitted_when_some() { + let mut p = make_pipeline(); + p.build = Some(BuildStep { command: "cargo build --release".to_string(), artifact_path: None }); + let output = render(&p); + assert!(output.contains("cargo build --release")); + } + + #[test] + fn test_docker_omitted_when_none() { + let output = render(&make_pipeline()); + assert!(!output.contains("docker build")); + } + + #[test] + fn test_docker_script_emitted() { + let mut p = make_pipeline(); + p.docker_build = Some(DockerBuildStep { + image_tag: "myrepo/app:latest".to_string(), + push: true, + qemu: false, + buildx: false, + }); + let output = render(&p); + assert!(output.contains("docker build -t myrepo/app:latest .")); + assert!(output.contains("docker push myrepo/app:latest")); + } + + #[test] + fn test_image_scan_omitted_when_none() { + let output = render(&make_pipeline()); + assert!(!output.contains("trivy")); + } + + #[test] + fn test_image_scan_script_emitted() { + let mut p = make_pipeline(); + p.image_scan = Some(ImageScanStep { + image_ref: "myrepo/app:latest".to_string(), + fail_on_severity: "CRITICAL,HIGH".to_string(), + format: "table".to_string(), + output: "trivy.txt".to_string(), + upload_sarif: false, + }); + let output = render(&p); + assert!(output.contains("trivy image")); + assert!(output.contains("myrepo/app:latest")); + } + + #[test] + fn test_secret_scan_always_present() { + let output = render(&make_pipeline()); + assert!(output.contains("gitleaks detect")); + assert!(output.contains("GITHUB_TOKEN")); + // Must use Azure variable syntax, not GitHub Actions expression + assert!(output.contains("$(GITHUB_TOKEN)")); + assert!(!output.contains("secrets.GITHUB_TOKEN")); + } + + #[test] + fn test_artifact_task_emitted() { + let mut p = make_pipeline(); + p.upload_artifact = Some(ArtifactStep { + name: "dist".to_string(), + path: "dist/**".to_string(), + }); + let output = render(&p); + assert!(output.contains("PublishBuildArtifacts@1")); + assert!(output.contains("dist/**")); + } + + #[test] + fn test_cache_task_emitted() { + let mut p = make_pipeline(); + p.cache = Some(CacheStep { + paths: vec!["~/.npm".to_string()], + key: "npm-$(Agent.OS)-$(Build.SourceVersion)".to_string(), + restore_keys: vec!["npm-$(Agent.OS)-".to_string()], + }); + let output = render(&p); + assert!(output.contains("Cache@2")); + assert!(output.contains("~/.npm")); + } + + #[test] + fn test_scheduled_trigger_emitted() { + let mut p = make_pipeline(); + p.triggers.scheduled = Some("0 3 * * 1".to_string()); + let output = render(&p); + assert!(output.contains("schedules")); + assert!(output.contains("0 3 * * 1")); + } + + #[test] + fn test_tag_pattern_in_trigger() { + let mut p = make_pipeline(); + p.triggers.tag_pattern = Some("v*".to_string()); + let output = render(&p); + assert!(output.contains("tags")); + assert!(output.contains("v*")); + } + + #[test] + fn test_gitleaks_license_env_when_some() { + let mut p = make_pipeline(); + p.secret_scan.gitleaks_license_secret = Some("GITLEAKS_LICENSE".to_string()); + let output = render(&p); + assert!(output.contains("GITLEAKS_LICENSE")); + } + + #[test] + fn test_cache_key_translated_to_azure_syntax() { + let mut p = make_pipeline(); + p.cache = Some(CacheStep { + paths: vec!["~/.cargo/registry".to_string()], + key: "cargo-${{ runner.os }}-${{ hashFiles('**/Cargo.lock') }}".to_string(), + restore_keys: vec!["cargo-${{ runner.os }}-".to_string()], + }); + let output = render(&p); + // GitHub Actions expressions must be absent + assert!(!output.contains("runner.os"), "runner.os should be translated"); + assert!(!output.contains("hashFiles"), "hashFiles() should be stripped"); + // Azure syntax must be present + assert!(output.contains("Agent.OS")); + assert!(output.contains("Cargo.lock")); + } + + #[test] + fn test_gh_cache_key_to_azure_cargo() { + let input = "cargo-${{ runner.os }}-${{ hashFiles('**/Cargo.lock') }}"; + let result = gh_cache_key_to_azure(input); + assert_eq!(result, "cargo | $(Agent.OS) | **/Cargo.lock"); + } + + #[test] + fn test_gh_cache_key_to_azure_restore_key() { + let input = "cargo-${{ runner.os }}-"; + let result = gh_cache_key_to_azure(input); + assert_eq!(result, "cargo | $(Agent.OS)"); + } + + #[test] + fn test_gh_cache_key_to_azure_npm() { + let input = "npm-${{ runner.os }}-${{ hashFiles('**/package-lock.json') }}"; + let result = gh_cache_key_to_azure(input); + assert_eq!(result, "npm | $(Agent.OS) | **/package-lock.json"); + } +} diff --git a/src/generator/ci_generation/templates/cloud_build.rs b/src/generator/ci_generation/templates/cloud_build.rs new file mode 100644 index 00000000..0e427a2a --- /dev/null +++ b/src/generator/ci_generation/templates/cloud_build.rs @@ -0,0 +1,468 @@ +//! GCP Cloud Build CI Template Builder — CI-13 +//! +//! Generates `cloudbuild.yaml`. Each CI step maps to a Cloud Build step +//! keyed by a Docker `name:` (container image), an `entrypoint:`, and `args:`. +//! +//! Key design constraints vs. GitHub Actions / Azure Pipelines: +//! - No "runtime setup" step: the container image IS the runtime. +//! - No trigger block: GCP triggers are configured in the console/API. +//! - Artifact upload maps to top-level `artifacts.objects` (GCS path). +//! - Trivy → `aquasec/trivy` image; Gitleaks → `zricethezav/gitleaks` image. +//! - Cache: no native dep cache; skipped (GCS volume mounts require bucket info). + +use serde::Serialize; + +use crate::generator::ci_generation::schema::CiPipeline; + +// ── YAML document structs ───────────────────────────────────────────────────── + +#[derive(Serialize)] +struct CloudBuildConfig { + steps: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + artifacts: Option, + timeout: String, +} + +/// A single Cloud Build step. `name` is always a Docker image URI. +#[derive(Serialize, Default)] +struct CloudBuildStep { + name: String, + #[serde(skip_serializing_if = "Option::is_none")] + id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + entrypoint: Option, + #[serde(skip_serializing_if = "Option::is_none")] + args: Option>, + /// Cloud Build env entries are `"KEY=VALUE"` strings. + #[serde(skip_serializing_if = "Option::is_none")] + env: Option>, +} + +#[derive(Serialize)] +struct Artifacts { + objects: ArtifactObjects, +} + +#[derive(Serialize)] +struct ArtifactObjects { + location: String, + paths: Vec, +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Renders a `CiPipeline` into a GCP Cloud Build YAML string. +/// +/// The returned string is ready to write as `cloudbuild.yaml` at the +/// repository root. Triggers must be configured separately in the GCP console. +pub fn render(pipeline: &CiPipeline) -> String { + let doc = build_config(pipeline); + serde_yaml::to_string(&doc) + .expect("CloudBuildConfig serialisation is infallible for valid CiPipeline") +} + +// ── Builder ─────────────────────────────────────────────────────────────────── + +fn build_config(pipeline: &CiPipeline) -> CloudBuildConfig { + CloudBuildConfig { + steps: build_steps(pipeline), + artifacts: pipeline.upload_artifact.as_ref().map(|art| Artifacts { + objects: ArtifactObjects { + location: format!("gs://{{{{GCS_ARTIFACTS_BUCKET}}}}/{}", art.name), + paths: vec![art.path.clone()], + }, + }), + timeout: "3600s".to_string(), + } +} + +fn build_steps(pipeline: &CiPipeline) -> Vec { + let runtime_image = runtime_docker_image(&pipeline.runtime.action, &pipeline.runtime.version); + let mut steps: Vec = Vec::new(); + + // NOTE: Cloud Build auto-clones the source repo — no checkout step needed. + + // 1. Install + steps.push(shell_step( + &runtime_image, + Some("Install dependencies"), + &pipeline.install.command, + None, + )); + + // 2. Lint (optional) + if let Some(lint) = &pipeline.lint { + steps.push(shell_step(&runtime_image, Some("Lint"), &lint.command, None)); + } + + // 3. Test + let test_cmd = match &pipeline.test.coverage_flag { + Some(flag) => format!("{} {}", pipeline.test.command, flag), + None => pipeline.test.command.clone(), + }; + steps.push(shell_step(&runtime_image, Some("Test"), &test_cmd, None)); + + // 4. Build (optional) + if let Some(build) = &pipeline.build { + steps.push(shell_step(&runtime_image, Some("Build"), &build.command, None)); + } + + // 5. Docker (optional) — gcr.io/cloud-builders/docker is the canonical builder image + if let Some(docker) = &pipeline.docker_build { + steps.push(CloudBuildStep { + name: "gcr.io/cloud-builders/docker".to_string(), + id: Some("Build Docker image".to_string()), + args: Some(vec![ + "build".to_string(), + "-t".to_string(), + docker.image_tag.clone(), + ".".to_string(), + ]), + ..Default::default() + }); + if docker.push { + steps.push(CloudBuildStep { + name: "gcr.io/cloud-builders/docker".to_string(), + id: Some("Push Docker image".to_string()), + args: Some(vec!["push".to_string(), docker.image_tag.clone()]), + ..Default::default() + }); + } + } + + // 6. Image scan (optional) — aquasec/trivy image + if let Some(scan) = &pipeline.image_scan { + steps.push(CloudBuildStep { + name: "aquasec/trivy".to_string(), + id: Some("Scan image (Trivy)".to_string()), + args: Some(vec![ + "image".to_string(), + "--exit-code".to_string(), + "1".to_string(), + "--severity".to_string(), + scan.fail_on_severity.clone(), + "--format".to_string(), + scan.format.clone(), + "--output".to_string(), + scan.output.clone(), + scan.image_ref.clone(), + ]), + ..Default::default() + }); + } + + // 7. Secret scan (always) — zricethezav/gitleaks image + // GCP Cloud Build uses $_VARIABLE_NAME for user-defined substitution variables. + let mut sec_env = vec!["GITHUB_TOKEN=$_GITHUB_TOKEN".to_string()]; + if let Some(license) = &pipeline.secret_scan.gitleaks_license_secret { + sec_env.push(format!("GITLEAKS_LICENSE=${{{}}}", license)); + } + steps.push(CloudBuildStep { + name: "zricethezav/gitleaks".to_string(), + id: Some("Secret scan (Gitleaks)".to_string()), + args: Some(vec![ + "detect".to_string(), + "--source".to_string(), + "/workspace".to_string(), + "--exit-code".to_string(), + "1".to_string(), + ]), + env: Some(sec_env), + ..Default::default() + }); + + steps +} + +/// Constructs a step that runs a shell command via `bash -c` inside the +/// given container image. Suitable for any arbitrary `run:` equivalent. +fn shell_step( + image: &str, + id: Option<&str>, + command: &str, + env: Option>, +) -> CloudBuildStep { + CloudBuildStep { + name: image.to_string(), + id: id.map(|s| s.to_string()), + entrypoint: Some("bash".to_string()), + args: Some(vec!["-c".to_string(), command.to_string()]), + env, + ..Default::default() + } +} + +/// Maps a GitHub Actions runtime action to the equivalent Docker Hub image URI +/// used as the Cloud Build step `name:`. +fn runtime_docker_image(action: &str, version: &str) -> String { + if action.contains("setup-node") { + format!("node:{version}") + } else if action.contains("setup-python") { + format!("python:{version}") + } else if action.contains("setup-go") { + format!("golang:{version}") + } else if action.contains("setup-java") { + format!("eclipse-temurin:{version}") + } else if action.contains("rust-toolchain") { + format!("rust:{version}") + } else { + // Unknown runtime: fall back to a generic Debian image with bash + "debian:bookworm-slim".to_string() + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::cli::{CiFormat, CiPlatform}; + use crate::generator::ci_generation::schema::{ + ArtifactStep, BuildStep, CacheStep, CiPipeline, DockerBuildStep, ImageScanStep, + InstallStep, LintStep, RuntimeStep, SecretScanStep, TestStep, TriggerConfig, + }; + + fn make_pipeline() -> CiPipeline { + CiPipeline { + project_name: "my-app".to_string(), + platform: CiPlatform::Gcp, + format: CiFormat::CloudBuild, + triggers: TriggerConfig { + push_branches: vec!["main".to_string()], + pr_branches: vec!["main".to_string()], + tag_pattern: None, + scheduled: None, + }, + runtime: RuntimeStep { + action: "actions/setup-node@v4".to_string(), + version: "20".to_string(), + }, + cache: None, + install: InstallStep { command: "npm ci".to_string() }, + lint: None, + test: TestStep { + command: "npx jest".to_string(), + coverage_flag: None, + coverage_report_path: None, + }, + build: None, + docker_build: None, + image_scan: None, + secret_scan: SecretScanStep { + github_token_expr: "$_GITHUB_TOKEN".to_string(), + gitleaks_license_secret: None, + }, + upload_artifact: None, + unresolved_tokens: vec![], + } + } + + #[test] + fn test_render_produces_valid_yaml() { + let output = render(&make_pipeline()); + let parsed: Result = serde_yaml::from_str(&output); + assert!(parsed.is_ok(), "render output must be valid YAML:\n{output}"); + } + + #[test] + fn test_no_trigger_block_emitted() { + // Cloud Build triggers live in the GCP console, not in the YAML. + let output = render(&make_pipeline()); + assert!(!output.contains("trigger:")); + assert!(!output.contains("on:")); + } + + #[test] + fn test_timeout_emitted() { + let output = render(&make_pipeline()); + assert!(output.contains("3600s")); + } + + #[test] + fn test_node_runtime_image_used() { + let output = render(&make_pipeline()); + assert!(output.contains("node:20")); + } + + #[test] + fn test_python_runtime_image() { + let mut p = make_pipeline(); + p.runtime = RuntimeStep { + action: "actions/setup-python@v4".to_string(), + version: "3.11".to_string(), + }; + let output = render(&p); + assert!(output.contains("python:3.11")); + } + + #[test] + fn test_rust_runtime_image() { + let mut p = make_pipeline(); + p.runtime = RuntimeStep { + action: "dtolnay/rust-toolchain@stable".to_string(), + version: "stable".to_string(), + }; + let output = render(&p); + assert!(output.contains("rust:stable")); + } + + #[test] + fn test_install_step_uses_bash_entrypoint() { + let output = render(&make_pipeline()); + assert!(output.contains("bash")); + assert!(output.contains("npm ci")); + } + + #[test] + fn test_lint_omitted_when_none() { + let output = render(&make_pipeline()); + assert!(!output.contains("Lint")); + } + + #[test] + fn test_lint_step_emitted() { + let mut p = make_pipeline(); + p.lint = Some(LintStep { command: "cargo clippy -- -D warnings".to_string() }); + let output = render(&p); + assert!(output.contains("cargo clippy -- -D warnings")); + assert!(output.contains("Lint")); + } + + #[test] + fn test_test_command_emitted() { + let output = render(&make_pipeline()); + assert!(output.contains("npx jest")); + } + + #[test] + fn test_coverage_flag_appended() { + let mut p = make_pipeline(); + p.test.coverage_flag = Some("--coverage".to_string()); + let output = render(&p); + assert!(output.contains("npx jest --coverage")); + } + + #[test] + fn test_build_omitted_when_none() { + let output = render(&make_pipeline()); + assert!(!output.contains("id: Build")); + } + + #[test] + fn test_build_step_emitted() { + let mut p = make_pipeline(); + p.build = Some(BuildStep { + command: "cargo build --release".to_string(), + artifact_path: None, + }); + let output = render(&p); + assert!(output.contains("cargo build --release")); + } + + #[test] + fn test_docker_omitted_when_none() { + let output = render(&make_pipeline()); + assert!(!output.contains("gcr.io/cloud-builders/docker")); + } + + #[test] + fn test_docker_build_step_emitted() { + let mut p = make_pipeline(); + p.docker_build = Some(DockerBuildStep { + image_tag: "gcr.io/my-project/app:latest".to_string(), + push: false, + qemu: false, + buildx: false, + }); + let output = render(&p); + assert!(output.contains("gcr.io/cloud-builders/docker")); + assert!(output.contains("gcr.io/my-project/app:latest")); + } + + #[test] + fn test_docker_push_step_emitted() { + let mut p = make_pipeline(); + p.docker_build = Some(DockerBuildStep { + image_tag: "gcr.io/my-project/app:latest".to_string(), + push: true, + qemu: false, + buildx: false, + }); + let output = render(&p); + assert!(output.contains("Push Docker image")); + assert!(output.contains("push")); + } + + #[test] + fn test_image_scan_omitted_when_none() { + let output = render(&make_pipeline()); + assert!(!output.contains("aquasec/trivy")); + } + + #[test] + fn test_trivy_step_emitted() { + let mut p = make_pipeline(); + p.image_scan = Some(ImageScanStep { + image_ref: "gcr.io/my-project/app:latest".to_string(), + fail_on_severity: "CRITICAL,HIGH".to_string(), + format: "table".to_string(), + output: "trivy.txt".to_string(), + upload_sarif: false, + }); + let output = render(&p); + assert!(output.contains("aquasec/trivy")); + assert!(output.contains("CRITICAL,HIGH")); + } + + #[test] + fn test_secret_scan_always_present() { + let output = render(&make_pipeline()); + assert!(output.contains("zricethezav/gitleaks")); + assert!(output.contains("GITHUB_TOKEN")); + // Must use GCP substitution variable syntax, not GitHub Actions expression + assert!(output.contains("$_GITHUB_TOKEN")); + assert!(!output.contains("secrets.GITHUB_TOKEN")); + } + + #[test] + fn test_gitleaks_license_env_when_some() { + let mut p = make_pipeline(); + p.secret_scan.gitleaks_license_secret = Some("GITLEAKS_LICENSE".to_string()); + let output = render(&p); + assert!(output.contains("GITLEAKS_LICENSE")); + } + + #[test] + fn test_artifact_objects_emitted() { + let mut p = make_pipeline(); + p.upload_artifact = Some(ArtifactStep { + name: "build-output".to_string(), + path: "dist/**".to_string(), + }); + let output = render(&p); + assert!(output.contains("artifacts")); + assert!(output.contains("GCS_ARTIFACTS_BUCKET")); + assert!(output.contains("dist/**")); + } + + #[test] + fn test_no_artifacts_section_when_none() { + let output = render(&make_pipeline()); + assert!(!output.contains("artifacts:")); + } + + #[test] + fn test_cache_step_not_emitted() { + // Cloud Build has no native dep cache — CacheStep is deliberately skipped. + let mut p = make_pipeline(); + p.cache = Some(CacheStep { + paths: vec!["~/.npm".to_string()], + key: "npm-key".to_string(), + restore_keys: vec![], + }); + let output = render(&p); + assert!(!output.contains("Cache@2")); + assert!(!output.contains("actions/cache")); + } +} diff --git a/src/generator/ci_generation/templates/github_actions.rs b/src/generator/ci_generation/templates/github_actions.rs new file mode 100644 index 00000000..446f5e85 --- /dev/null +++ b/src/generator/ci_generation/templates/github_actions.rs @@ -0,0 +1,507 @@ +//! GitHub Actions CI Template Builder — CI-11 +//! +//! Assembles all generated steps into a valid `.github/workflows/ci.yml` +//! by mapping every field of `CiPipeline` onto a typed `GithubWorkflow` struct +//! and serialising it with `serde_yaml`. No string concatenation — the +//! compiler enforces structural validity. + +use std::collections::BTreeMap; + +use serde::Serialize; + +use crate::generator::ci_generation::schema::CiPipeline; + +// ── YAML document structs ───────────────────────────────────────────────────── + +#[derive(Serialize)] +struct GithubWorkflow { + name: String, + /// `on` is a reserved word in Rust; serde renames it in the output. + #[serde(rename = "on")] + on: WorkflowOn, + jobs: Jobs, +} + +#[derive(Serialize)] +struct WorkflowOn { + push: PushTrigger, + pull_request: PrTrigger, + #[serde(skip_serializing_if = "Option::is_none")] + schedule: Option>, +} + +#[derive(Serialize)] +struct PushTrigger { + branches: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + tags: Option>, +} + +#[derive(Serialize)] +struct PrTrigger { + branches: Vec, +} + +#[derive(Serialize)] +struct CronEntry { + cron: String, +} + +#[derive(Serialize)] +struct Jobs { + ci: Job, +} + +#[derive(Serialize)] +struct Job { + #[serde(rename = "runs-on")] + runs_on: String, + steps: Vec, +} + +/// A single workflow step. All fields are optional so the same struct covers +/// both `uses:` steps and `run:` steps — absent fields are omitted from the +/// YAML output via `skip_serializing_if`. +#[derive(Serialize, Default)] +struct Step { + #[serde(skip_serializing_if = "Option::is_none")] + name: Option, + #[serde(skip_serializing_if = "Option::is_none")] + uses: Option, + #[serde(skip_serializing_if = "Option::is_none")] + run: Option, + #[serde(skip_serializing_if = "Option::is_none")] + with: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + env: Option>, +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Renders a `CiPipeline` into a GitHub Actions workflow YAML string. +/// +/// The returned string is suitable for writing directly to +/// `.github/workflows/ci.yml` or printing for `--dry-run`. +pub fn render(pipeline: &CiPipeline) -> String { + let workflow = build_workflow(pipeline); + serde_yaml::to_string(&workflow) + .expect("GithubWorkflow serialisation is infallible for valid CiPipeline") +} + +// ── Builder ─────────────────────────────────────────────────────────────────── + +fn build_workflow(pipeline: &CiPipeline) -> GithubWorkflow { + GithubWorkflow { + name: "CI".to_string(), + on: build_on(&pipeline.triggers), + jobs: Jobs { + ci: Job { + runs_on: "ubuntu-latest".to_string(), + steps: build_steps(pipeline), + }, + }, + } +} + +fn build_on(triggers: &crate::generator::ci_generation::schema::TriggerConfig) -> WorkflowOn { + WorkflowOn { + push: PushTrigger { + branches: triggers.push_branches.clone(), + tags: triggers.tag_pattern.as_ref().map(|p| vec![p.clone()]), + }, + pull_request: PrTrigger { + branches: triggers.pr_branches.clone(), + }, + schedule: triggers.scheduled.as_ref().map(|cron| { + vec![CronEntry { cron: cron.clone() }] + }), + } +} + +fn build_steps(pipeline: &CiPipeline) -> Vec { + let mut steps: Vec = Vec::new(); + + // 1. Checkout + steps.push(Step { uses: Some("actions/checkout@v4".to_string()), ..Default::default() }); + + // 2. Runtime setup + let mut runtime_with = BTreeMap::new(); + runtime_with.insert( + runtime_version_key(&pipeline.runtime.action).to_string(), + pipeline.runtime.version.clone(), + ); + steps.push(Step { + name: Some("Set up runtime".to_string()), + uses: Some(pipeline.runtime.action.clone()), + with: Some(runtime_with), + ..Default::default() + }); + + // 3. Cache (optional) + if let Some(cache) = &pipeline.cache { + let mut w = BTreeMap::new(); + w.insert("path".to_string(), cache.paths.join("\n")); + w.insert("key".to_string(), cache.key.clone()); + if !cache.restore_keys.is_empty() { + w.insert("restore-keys".to_string(), cache.restore_keys.join("\n")); + } + steps.push(Step { + name: Some("Cache dependencies".to_string()), + uses: Some("actions/cache@v4".to_string()), + with: Some(w), + ..Default::default() + }); + } + + // 4. Install + steps.push(Step { + name: Some("Install dependencies".to_string()), + run: Some(pipeline.install.command.clone()), + ..Default::default() + }); + + // 5. Lint (optional) + if let Some(lint) = &pipeline.lint { + steps.push(Step { + name: Some("Lint".to_string()), + run: Some(lint.command.clone()), + ..Default::default() + }); + } + + // 6. Test + let test_cmd = match &pipeline.test.coverage_flag { + Some(flag) => format!("{} {}", pipeline.test.command, flag), + None => pipeline.test.command.clone(), + }; + steps.push(Step { + name: Some("Test".to_string()), + run: Some(test_cmd), + ..Default::default() + }); + + // 7. Build (optional) + if let Some(build) = &pipeline.build { + steps.push(Step { + name: Some("Build".to_string()), + run: Some(build.command.clone()), + ..Default::default() + }); + } + + // 8. Docker steps (optional) + if let Some(docker) = &pipeline.docker_build { + if docker.qemu { + steps.push(Step { + uses: Some("docker/setup-qemu-action@v3".to_string()), + ..Default::default() + }); + } + if docker.buildx { + steps.push(Step { + uses: Some("docker/setup-buildx-action@v3".to_string()), + ..Default::default() + }); + } + steps.push(Step { + name: Some("Build Docker image".to_string()), + run: Some(format!("docker build -t {} .", docker.image_tag)), + ..Default::default() + }); + if docker.push { + steps.push(Step { + name: Some("Push Docker image".to_string()), + run: Some(format!("docker push {}", docker.image_tag)), + ..Default::default() + }); + } + } + + // 9. Image scan (optional) + if let Some(scan) = &pipeline.image_scan { + let mut w = BTreeMap::new(); + w.insert("exit-code".to_string(), "1".to_string()); + w.insert("format".to_string(), scan.format.clone()); + w.insert("image-ref".to_string(), scan.image_ref.clone()); + w.insert("output".to_string(), scan.output.clone()); + w.insert("severity".to_string(), scan.fail_on_severity.clone()); + steps.push(Step { + uses: Some("aquasecurity/trivy-action@master".to_string()), + with: Some(w), + ..Default::default() + }); + + if scan.upload_sarif { + let mut w = BTreeMap::new(); + w.insert("sarif_file".to_string(), scan.output.clone()); + steps.push(Step { + uses: Some("github/codeql-action/upload-sarif@v3".to_string()), + with: Some(w), + ..Default::default() + }); + } + } + + // 10. Secret scan (always) + let mut sec_env = BTreeMap::new(); + sec_env.insert("GITHUB_TOKEN".to_string(), pipeline.secret_scan.github_token_expr.clone()); + if let Some(license) = &pipeline.secret_scan.gitleaks_license_secret { + sec_env.insert( + "GITLEAKS_LICENSE".to_string(), + format!("${{{{ secrets.{} }}}}", license), + ); + } + steps.push(Step { + uses: Some("gitleaks/gitleaks-action@v2".to_string()), + env: Some(sec_env), + ..Default::default() + }); + + // 11. Artifact upload (optional) + if let Some(artifact) = &pipeline.upload_artifact { + let mut w = BTreeMap::new(); + w.insert("name".to_string(), artifact.name.clone()); + w.insert("path".to_string(), artifact.path.clone()); + steps.push(Step { + name: Some("Upload artifact".to_string()), + uses: Some("actions/upload-artifact@v4".to_string()), + with: Some(w), + ..Default::default() + }); + } + + steps +} + +/// Derives the `with:` key name for the runtime version from the action string. +fn runtime_version_key(action: &str) -> &'static str { + if action.contains("setup-node") { "node-version" } + else if action.contains("setup-python") { "python-version" } + else if action.contains("setup-go") { "go-version" } + else if action.contains("setup-java") { "java-version" } + else if action.contains("rust-toolchain") { "toolchain" } + else { "version" } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::cli::{CiFormat, CiPlatform}; + use crate::generator::ci_generation::schema::{ + ArtifactStep, BuildStep, CacheStep, CiPipeline, DockerBuildStep, ImageScanStep, + InstallStep, LintStep, RuntimeStep, SecretScanStep, TestStep, TriggerConfig, + }; + + fn make_pipeline() -> CiPipeline { + CiPipeline { + project_name: "my-app".to_string(), + platform: CiPlatform::Hetzner, + format: CiFormat::GithubActions, + triggers: TriggerConfig { + push_branches: vec!["main".to_string()], + pr_branches: vec!["main".to_string()], + tag_pattern: None, + scheduled: None, + }, + runtime: RuntimeStep { + action: "actions/setup-node@v4".to_string(), + version: "20".to_string(), + }, + cache: None, + install: InstallStep { command: "npm ci".to_string() }, + lint: None, + test: TestStep { + command: "npx jest".to_string(), + coverage_flag: None, + coverage_report_path: None, + }, + build: None, + docker_build: None, + image_scan: None, + secret_scan: SecretScanStep { + github_token_expr: "${{ secrets.GITHUB_TOKEN }}".to_string(), + gitleaks_license_secret: None, + }, + upload_artifact: None, + unresolved_tokens: vec![], + } + } + + #[test] + fn test_render_produces_valid_yaml() { + let output = render(&make_pipeline()); + let parsed: Result = serde_yaml::from_str(&output); + assert!(parsed.is_ok(), "render output must be valid YAML:\n{output}"); + } + + #[test] + fn test_render_contains_checkout_step() { + let output = render(&make_pipeline()); + assert!(output.contains("actions/checkout@v4")); + } + + #[test] + fn test_render_job_runs_on_ubuntu() { + let output = render(&make_pipeline()); + assert!(output.contains("ubuntu-latest")); + } + + #[test] + fn test_render_workflow_name_is_ci() { + let output = render(&make_pipeline()); + assert!(output.contains("name: CI")); + } + + #[test] + fn test_push_branches_emitted() { + let output = render(&make_pipeline()); + assert!(output.contains("main")); + } + + #[test] + fn test_runtime_action_and_version_emitted() { + let output = render(&make_pipeline()); + assert!(output.contains("actions/setup-node@v4")); + assert!(output.contains("node-version")); + assert!(output.contains("'20'") || output.contains("\"20\"") || output.contains("20")); + } + + #[test] + fn test_lint_step_omitted_when_none() { + let output = render(&make_pipeline()); + assert!(!output.contains("Lint")); + } + + #[test] + fn test_lint_step_present_when_some() { + let mut p = make_pipeline(); + p.lint = Some(LintStep { command: "cargo clippy -- -D warnings".to_string() }); + let output = render(&p); + assert!(output.contains("cargo clippy -- -D warnings")); + } + + #[test] + fn test_test_command_emitted() { + let output = render(&make_pipeline()); + assert!(output.contains("npx jest")); + } + + #[test] + fn test_coverage_flag_appended_to_test_command() { + let mut p = make_pipeline(); + p.test.coverage_flag = Some("--coverage".to_string()); + let output = render(&p); + assert!(output.contains("npx jest --coverage")); + } + + #[test] + fn test_build_step_omitted_when_none() { + let output = render(&make_pipeline()); + assert!(!output.contains("Build\n") && !output.contains("name: Build")); + } + + #[test] + fn test_build_step_present_when_some() { + let mut p = make_pipeline(); + p.build = Some(BuildStep { command: "cargo build --release".to_string(), artifact_path: None }); + let output = render(&p); + assert!(output.contains("cargo build --release")); + } + + #[test] + fn test_docker_steps_omitted_when_none() { + let output = render(&make_pipeline()); + assert!(!output.contains("docker")); + } + + #[test] + fn test_docker_buildx_step_emitted() { + let mut p = make_pipeline(); + p.docker_build = Some(DockerBuildStep { + image_tag: "ghcr.io/org/app:sha".to_string(), + push: false, + qemu: false, + buildx: true, + }); + let output = render(&p); + assert!(output.contains("docker/setup-buildx-action@v3")); + assert!(output.contains("docker build")); + } + + #[test] + fn test_image_scan_omitted_when_none() { + let output = render(&make_pipeline()); + assert!(!output.contains("trivy-action")); + } + + #[test] + fn test_image_scan_step_emitted() { + let mut p = make_pipeline(); + p.docker_build = Some(DockerBuildStep { + image_tag: "ghcr.io/org/app:sha".to_string(), + push: false, qemu: false, buildx: true, + }); + p.image_scan = Some(ImageScanStep { + image_ref: "ghcr.io/org/app:sha".to_string(), + fail_on_severity: "CRITICAL,HIGH".to_string(), + format: "sarif".to_string(), + output: "trivy-results.sarif".to_string(), + upload_sarif: true, + }); + let output = render(&p); + assert!(output.contains("aquasecurity/trivy-action@master")); + assert!(output.contains("github/codeql-action/upload-sarif@v3")); + } + + #[test] + fn test_secret_scan_always_present() { + let output = render(&make_pipeline()); + assert!(output.contains("gitleaks/gitleaks-action@v2")); + assert!(output.contains("GITHUB_TOKEN")); + } + + #[test] + fn test_artifact_upload_emitted_when_some() { + let mut p = make_pipeline(); + p.upload_artifact = Some(ArtifactStep { + name: "build-output".to_string(), + path: "dist/**".to_string(), + }); + let output = render(&p); + assert!(output.contains("actions/upload-artifact@v4")); + assert!(output.contains("dist/**")); + } + + #[test] + fn test_scheduled_trigger_emitted() { + let mut p = make_pipeline(); + p.triggers.scheduled = Some("0 3 * * 1".to_string()); + let output = render(&p); + assert!(output.contains("schedule")); + assert!(output.contains("0 3 * * 1")); + } + + #[test] + fn test_tag_pattern_emitted_in_push_trigger() { + let mut p = make_pipeline(); + p.triggers.tag_pattern = Some("v*".to_string()); + let output = render(&p); + assert!(output.contains("tags")); + assert!(output.contains("v*")); + } + + #[test] + fn test_cache_step_emitted_when_some() { + let mut p = make_pipeline(); + p.cache = Some(CacheStep { + paths: vec!["~/.npm".to_string()], + key: "${{ runner.os }}-npm-${{ hashFiles('**/package-lock.json') }}".to_string(), + restore_keys: vec!["${{ runner.os }}-npm-".to_string()], + }); + let output = render(&p); + assert!(output.contains("actions/cache@v4")); + assert!(output.contains("~/.npm")); + } +} diff --git a/src/generator/ci_generation/templates/mod.rs b/src/generator/ci_generation/templates/mod.rs new file mode 100644 index 00000000..d632d414 --- /dev/null +++ b/src/generator/ci_generation/templates/mod.rs @@ -0,0 +1,12 @@ +//! CI/CD Template Builders +//! +//! Each submodule assembles a final YAML file for a specific platform +//! by rendering a `CiPipeline` schema into the target format. +//! +//! - `github_actions` — `.github/workflows/ci.yml` (CI-11) +//! - `azure_pipelines` — `azure-pipelines.yml` (CI-12) +//! - `cloud_build` — `cloudbuild.yaml` (CI-13) + +pub mod azure_pipelines; +pub mod cloud_build; +pub mod github_actions; diff --git a/src/generator/ci_generation/test_helpers.rs b/src/generator/ci_generation/test_helpers.rs new file mode 100644 index 00000000..24982c6e --- /dev/null +++ b/src/generator/ci_generation/test_helpers.rs @@ -0,0 +1,71 @@ +//! Shared test helpers for CI generation unit tests. + +use std::collections::HashMap; +use std::path::Path; + +use crate::analyzer::{AnalysisMetadata, ProjectAnalysis}; +use crate::cli::{CiFormat, CiPlatform}; +use crate::generator::ci_generation::context::{CiContext, PackageManager}; + +/// Constructs a minimal `CiContext` with all defaults for use in unit tests. +/// +/// Fields that matter for the test under hand should be overridden by the +/// caller after construction. Using struct-update syntax is idiomatic: +/// +/// ```ignore +/// let ctx = make_base_ctx(dir.path(), "TypeScript"); +/// let ctx = CiContext { package_manager: PackageManager::Npm, ..ctx }; +/// ``` +#[allow(deprecated)] +pub fn make_base_ctx(root: &Path, primary_language: &str) -> CiContext { + CiContext { + analysis: ProjectAnalysis { + project_root: root.to_path_buf(), + languages: vec![], + technologies: vec![], + frameworks: vec![], + dependencies: Default::default(), + entry_points: vec![], + ports: vec![], + health_endpoints: vec![], + environment_variables: vec![], + project_type: crate::analyzer::ProjectType::Unknown, + build_scripts: vec![], + services: vec![], + architecture_type: crate::analyzer::ArchitectureType::Monolithic, + docker_analysis: None, + infrastructure: None, + analysis_metadata: AnalysisMetadata { + timestamp: String::new(), + analyzer_version: String::new(), + analysis_duration_ms: 0, + files_analyzed: 0, + confidence_score: 0.0, + }, + }, + primary_language: primary_language.to_string(), + runtime_versions: HashMap::new(), + package_manager: PackageManager::Unknown, + lock_file: None, + test_framework: None, + linter: None, + build_command: None, + has_dockerfile: false, + monorepo: false, + monorepo_packages: vec![], + default_branch: "main".to_string(), + platform: CiPlatform::Gcp, + format: CiFormat::GithubActions, + project_name: "test-project".to_string(), + config_test_command: None, + env_prefix: None, + skip_steps: vec![], + extra_branches: vec![], + } +} + +/// Alias used by CI-22 tests. +pub fn make_minimal_context() -> CiContext { + use std::path::PathBuf; + make_base_ctx(&PathBuf::from("/tmp/test"), "Rust") +} diff --git a/src/generator/ci_generation/test_step.rs b/src/generator/ci_generation/test_step.rs new file mode 100644 index 00000000..1e54c19c --- /dev/null +++ b/src/generator/ci_generation/test_step.rs @@ -0,0 +1,156 @@ +//! Test Step Generator — CI-05 +//! +//! Maps the detected `TestFramework` to the correct `TestStep` command +//! and optional coverage flags. Unknown or absent framework → placeholder token. + +use crate::generator::ci_generation::{context::{CiContext, TestFramework}, schema::TestStep}; + +/// Generates the test invocation step from the project's detected test framework. +/// +/// Every `TestFramework` variant maps to a specific command, optional coverage +/// flag, and optional coverage report path. `None` or `Unknown` → placeholder +/// so the pipeline is still valid YAML that the user can fill in. +pub fn generate_test_step(ctx: &CiContext) -> TestStep { + match &ctx.test_framework { + Some(TestFramework::Jest) => TestStep { + command: "npx jest".to_string(), + coverage_flag: Some("--coverage".to_string()), + coverage_report_path: Some("coverage/lcov.info".to_string()), + }, + Some(TestFramework::Vitest) => TestStep { + command: "npx vitest run".to_string(), + coverage_flag: Some("--coverage".to_string()), + coverage_report_path: Some("coverage/lcov.info".to_string()), + }, + Some(TestFramework::Mocha) => TestStep { + command: "npx mocha".to_string(), + coverage_flag: None, + coverage_report_path: None, + }, + Some(TestFramework::Pytest) => TestStep { + command: "pytest".to_string(), + coverage_flag: Some("--cov=. --cov-report=xml".to_string()), + coverage_report_path: Some("coverage.xml".to_string()), + }, + Some(TestFramework::CargoTest) => TestStep { + command: "cargo test".to_string(), + coverage_flag: None, + coverage_report_path: None, + }, + Some(TestFramework::GoTest) => TestStep { + command: "go test ./...".to_string(), + coverage_flag: Some("-coverprofile=coverage.out".to_string()), + coverage_report_path: Some("coverage.out".to_string()), + }, + Some(TestFramework::JunitMaven) => TestStep { + command: "mvn test".to_string(), + coverage_flag: None, + coverage_report_path: Some("target/surefire-reports".to_string()), + }, + Some(TestFramework::JunitGradle) => TestStep { + command: "./gradlew test".to_string(), + coverage_flag: None, + coverage_report_path: Some("build/reports/tests".to_string()), + }, + Some(TestFramework::Unknown) | None => TestStep { + command: "{{TEST_COMMAND}}".to_string(), + coverage_flag: None, + coverage_report_path: None, + }, + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::generator::ci_generation::{context::CiContext, test_helpers::make_base_ctx}; + use tempfile::TempDir; + + fn ctx_with_framework(tf: Option) -> (CiContext, TempDir) { + let dir = TempDir::new().unwrap(); + let ctx = CiContext { test_framework: tf, ..make_base_ctx(dir.path(), "") }; + (ctx, dir) + } + + #[test] + fn test_jest_command_and_coverage() { + let (ctx, _dir) = ctx_with_framework(Some(TestFramework::Jest)); + let step = generate_test_step(&ctx); + assert_eq!(step.command, "npx jest"); + assert_eq!(step.coverage_flag.as_deref(), Some("--coverage")); + assert_eq!(step.coverage_report_path.as_deref(), Some("coverage/lcov.info")); + } + + #[test] + fn test_vitest_command_and_coverage() { + let (ctx, _dir) = ctx_with_framework(Some(TestFramework::Vitest)); + let step = generate_test_step(&ctx); + assert_eq!(step.command, "npx vitest run"); + assert_eq!(step.coverage_flag.as_deref(), Some("--coverage")); + } + + #[test] + fn test_mocha_no_coverage() { + let (ctx, _dir) = ctx_with_framework(Some(TestFramework::Mocha)); + let step = generate_test_step(&ctx); + assert_eq!(step.command, "npx mocha"); + assert!(step.coverage_flag.is_none()); + } + + #[test] + fn test_pytest_coverage_xml() { + let (ctx, _dir) = ctx_with_framework(Some(TestFramework::Pytest)); + let step = generate_test_step(&ctx); + assert_eq!(step.command, "pytest"); + assert!(step.coverage_flag.unwrap().contains("--cov")); + assert_eq!(step.coverage_report_path.as_deref(), Some("coverage.xml")); + } + + #[test] + fn test_cargo_test_no_coverage_flag() { + let (ctx, _dir) = ctx_with_framework(Some(TestFramework::CargoTest)); + let step = generate_test_step(&ctx); + assert_eq!(step.command, "cargo test"); + assert!(step.coverage_flag.is_none()); + } + + #[test] + fn test_go_test_coverage_profile() { + let (ctx, _dir) = ctx_with_framework(Some(TestFramework::GoTest)); + let step = generate_test_step(&ctx); + assert_eq!(step.command, "go test ./..."); + assert_eq!(step.coverage_flag.as_deref(), Some("-coverprofile=coverage.out")); + } + + #[test] + fn test_junit_maven_surefire_report() { + let (ctx, _dir) = ctx_with_framework(Some(TestFramework::JunitMaven)); + let step = generate_test_step(&ctx); + assert_eq!(step.command, "mvn test"); + assert!(step.coverage_report_path.as_deref().unwrap().contains("surefire")); + } + + #[test] + fn test_junit_gradle_jacoco_report() { + let (ctx, _dir) = ctx_with_framework(Some(TestFramework::JunitGradle)); + let step = generate_test_step(&ctx); + assert_eq!(step.command, "./gradlew test"); + assert!(step.coverage_report_path.as_deref().unwrap().contains("build/reports")); + } + + #[test] + fn test_unknown_framework_yields_placeholder() { + let (ctx, _dir) = ctx_with_framework(Some(TestFramework::Unknown)); + let step = generate_test_step(&ctx); + assert!(step.command.contains("{{TEST_COMMAND}}")); + } + + #[test] + fn test_no_framework_yields_placeholder() { + let (ctx, _dir) = ctx_with_framework(None); + let step = generate_test_step(&ctx); + assert!(step.command.contains("{{TEST_COMMAND}}")); + } +} diff --git a/src/generator/ci_generation/token_resolver.rs b/src/generator/ci_generation/token_resolver.rs new file mode 100644 index 00000000..1862920e --- /dev/null +++ b/src/generator/ci_generation/token_resolver.rs @@ -0,0 +1,343 @@ +//! Placeholder Token Resolution Engine — CI-15 +//! +//! Two-pass strategy: +//! 1. **Deterministic pass** — replaces `{{TOKEN_NAME}}` in String fields +//! when the value can be derived unambiguously from `CiContext`. +//! 2. **Placeholder pass** — any remaining `{{TOKEN_NAME}}` pattern becomes +//! an `UnresolvedToken` in `pipeline.unresolved_tokens`. +//! +//! `write_manifest` serialises both maps to `ci-manifest.toml` for the agent +//! fill phase and interactive prompts. + +use std::collections::HashMap; +use std::path::Path; + +use regex::Regex; +use serde::Serialize; + +use crate::error::{GeneratorError, IaCGeneratorError}; +use crate::generator::ci_generation::{ + context::CiContext, + schema::{CiPipeline, UnresolvedToken}, +}; + +/// A map from `TOKEN_NAME` to its resolved value. +pub type ResolvedTokenMap = HashMap; + +/// Runs the two-pass resolution engine on `pipeline` in place. +/// +/// Returns the map of resolved tokens; callers pass this to `write_manifest`. +pub fn resolve_tokens(ctx: &CiContext, pipeline: &mut CiPipeline) -> ResolvedTokenMap { + let resolved = build_resolved_map(ctx); + // Compile once; reused across every field visit. + let re = Regex::new(r"\{\{([A-Z][A-Z0-9_]*)\}\}").expect("static regex is valid"); + apply_to_pipeline(pipeline, &resolved, &re); + resolved +} + +/// Writes the resolved and unresolved token inventories to `ci-manifest.toml`. +pub fn write_manifest( + resolved: &ResolvedTokenMap, + unresolved: &[UnresolvedToken], + dest: &Path, +) -> crate::Result<()> { + #[derive(Serialize)] + struct Entry { + #[serde(rename = "type")] + token_type: String, + hint: String, + } + + #[derive(Serialize)] + struct Manifest { + resolved: HashMap, + unresolved: HashMap, + } + + let manifest = Manifest { + resolved: resolved.clone(), + unresolved: unresolved + .iter() + .map(|u| { + ( + u.name.clone(), + Entry { token_type: u.token_type.clone(), hint: u.hint.clone() }, + ) + }) + .collect(), + }; + + let content = toml::to_string_pretty(&manifest) + .map_err(|e| IaCGeneratorError::Generation(GeneratorError::InvalidContext(e.to_string())))?; + + std::fs::write(dest, content)?; + Ok(()) +} + +// ── Private helpers ─────────────────────────────────────────────────────────── + +/// Builds the deterministic token map from `ctx`. +fn build_resolved_map(ctx: &CiContext) -> ResolvedTokenMap { + let mut map = HashMap::new(); + map.insert("PROJECT_NAME".to_string(), ctx.project_name.clone()); + if let Some(version) = ctx.runtime_versions.get(&ctx.primary_language) { + map.insert("RUNTIME_VERSION".to_string(), version.clone()); + } + map +} + +/// Visits every String field in `pipeline` that may carry a `{{TOKEN}}` and +/// applies both resolution passes. +fn apply_to_pipeline(pipeline: &mut CiPipeline, resolved: &ResolvedTokenMap, re: &Regex) { + let acc = &mut pipeline.unresolved_tokens; + + resolve_str(&mut pipeline.project_name, resolved, re, acc); + + resolve_str(&mut pipeline.runtime.version, resolved, re, acc); + + if let Some(cache) = &mut pipeline.cache { + for path in &mut cache.paths { + resolve_str(path, resolved, re, acc); + } + resolve_str(&mut cache.key, resolved, re, acc); + for key in &mut cache.restore_keys { + resolve_str(key, resolved, re, acc); + } + } + + resolve_str(&mut pipeline.install.command, resolved, re, acc); + + if let Some(lint) = &mut pipeline.lint { + resolve_str(&mut lint.command, resolved, re, acc); + } + + resolve_str(&mut pipeline.test.command, resolved, re, acc); + + if let Some(build) = &mut pipeline.build { + resolve_str(&mut build.command, resolved, re, acc); + } + + if let Some(docker) = &mut pipeline.docker_build { + resolve_str(&mut docker.image_tag, resolved, re, acc); + } + + if let Some(scan) = &mut pipeline.image_scan { + resolve_str(&mut scan.image_ref, resolved, re, acc); + } + + if let Some(artifact) = &mut pipeline.upload_artifact { + resolve_str(&mut artifact.name, resolved, re, acc); + resolve_str(&mut artifact.path, resolved, re, acc); + } +} + +/// Resolves known tokens and collects unknown ones from a single String field. +fn resolve_str( + field: &mut String, + resolved: &ResolvedTokenMap, + re: &Regex, + acc: &mut Vec, +) { + for (name, value) in resolved { + let placeholder = format!("{{{{{}}}}}", name); + if field.contains(&placeholder) { + *field = field.replace(&placeholder, value); + } + } + + let snapshot = field.clone(); + for cap in re.captures_iter(&snapshot) { + let name = cap[1].to_string(); + if !acc.iter().any(|u| u.name == name) { + acc.push(UnresolvedToken::new(&name, "Provide a value for this token", "string")); + } + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::cli::{CiFormat, CiPlatform}; + use crate::generator::ci_generation::{ + context::CiContext, + schema::{ + CiPipeline, InstallStep, SecretScanStep, TestStep, TriggerConfig, + }, + test_helpers::make_base_ctx, + }; + use tempfile::TempDir; + + fn make_pipeline(project_name: &str) -> CiPipeline { + CiPipeline { + project_name: project_name.to_string(), + platform: CiPlatform::Gcp, + format: CiFormat::GithubActions, + triggers: TriggerConfig { + push_branches: vec!["main".to_string()], + pr_branches: vec!["main".to_string()], + tag_pattern: None, + scheduled: None, + }, + runtime: crate::generator::ci_generation::schema::RuntimeStep { + action: "actions/setup-node@v4".to_string(), + version: "20".to_string(), + }, + cache: None, + install: InstallStep { command: "npm ci".to_string() }, + lint: None, + test: TestStep { + command: "npm test".to_string(), + coverage_flag: None, + coverage_report_path: None, + }, + build: None, + docker_build: None, + image_scan: None, + secret_scan: SecretScanStep { + github_token_expr: "${{ secrets.GITHUB_TOKEN }}".to_string(), + gitleaks_license_secret: None, + }, + upload_artifact: None, + unresolved_tokens: vec![], + } + } + + fn ctx_with_name(root: &std::path::Path, name: &str) -> CiContext { + CiContext { project_name: name.to_string(), ..make_base_ctx(root, "") } + } + + // ── Deterministic pass ──────────────────────────────────────────────────── + + #[test] + fn test_project_name_token_is_replaced() { + let dir = TempDir::new().unwrap(); + let ctx = ctx_with_name(dir.path(), "my-app"); + let mut pipeline = make_pipeline("{{PROJECT_NAME}}"); + + let resolved = resolve_tokens(&ctx, &mut pipeline); + + assert_eq!(pipeline.project_name, "my-app"); + assert_eq!(resolved.get("PROJECT_NAME").map(|s| s.as_str()), Some("my-app")); + } + + #[test] + fn test_runtime_version_token_is_replaced() { + let dir = TempDir::new().unwrap(); + let mut ctx = make_base_ctx(dir.path(), "Node.js"); + ctx.runtime_versions.insert("Node.js".to_string(), "20".to_string()); + + let mut pipeline = make_pipeline("proj"); + pipeline.runtime.version = "{{RUNTIME_VERSION}}".to_string(); + + resolve_tokens(&ctx, &mut pipeline); + + assert_eq!(pipeline.runtime.version, "20"); + assert!(pipeline.unresolved_tokens.is_empty()); + } + + #[test] + fn test_no_version_in_context_leaves_token_unresolved() { + let dir = TempDir::new().unwrap(); + let ctx = make_base_ctx(dir.path(), "Node.js"); // no runtime_versions + + let mut pipeline = make_pipeline("proj"); + pipeline.runtime.version = "{{RUNTIME_VERSION}}".to_string(); + + resolve_tokens(&ctx, &mut pipeline); + + assert_eq!(pipeline.runtime.version, "{{RUNTIME_VERSION}}"); + assert_eq!(pipeline.unresolved_tokens.len(), 1); + assert_eq!(pipeline.unresolved_tokens[0].name, "RUNTIME_VERSION"); + } + + // ── Placeholder pass ────────────────────────────────────────────────────── + + #[test] + fn test_unknown_token_becomes_unresolved_entry() { + let dir = TempDir::new().unwrap(); + let ctx = make_base_ctx(dir.path(), ""); + + let mut pipeline = make_pipeline("proj"); + pipeline.docker_build = Some(crate::generator::ci_generation::schema::DockerBuildStep { + image_tag: "{{REGISTRY_URL}}/my-app:latest".to_string(), + push: true, + qemu: false, + buildx: true, + }); + + resolve_tokens(&ctx, &mut pipeline); + + assert_eq!(pipeline.unresolved_tokens.len(), 1); + assert_eq!(pipeline.unresolved_tokens[0].name, "REGISTRY_URL"); + assert_eq!( + pipeline.unresolved_tokens[0].placeholder, + "{{REGISTRY_URL}}" + ); + } + + #[test] + fn test_duplicate_tokens_deduplicated() { + let dir = TempDir::new().unwrap(); + let ctx = make_base_ctx(dir.path(), ""); + + let mut pipeline = make_pipeline("proj"); + pipeline.docker_build = Some(crate::generator::ci_generation::schema::DockerBuildStep { + image_tag: "{{REGISTRY_URL}}/app:tag".to_string(), + push: true, + qemu: false, + buildx: true, + }); + pipeline.image_scan = Some(crate::generator::ci_generation::schema::ImageScanStep { + image_ref: "{{REGISTRY_URL}}/app:tag".to_string(), + fail_on_severity: "HIGH".to_string(), + format: "sarif".to_string(), + output: "trivy-results.sarif".to_string(), + upload_sarif: true, + }); + + resolve_tokens(&ctx, &mut pipeline); + + let registry_tokens: Vec<_> = pipeline + .unresolved_tokens + .iter() + .filter(|u| u.name == "REGISTRY_URL") + .collect(); + assert_eq!(registry_tokens.len(), 1, "REGISTRY_URL should not be duplicated"); + } + + // ── Manifest writing ────────────────────────────────────────────────────── + + #[test] + fn test_write_manifest_produces_valid_toml() { + let dir = TempDir::new().unwrap(); + let dest = dir.path().join("ci-manifest.toml"); + + let mut resolved = ResolvedTokenMap::new(); + resolved.insert("PROJECT_NAME".to_string(), "my-app".to_string()); + + let unresolved = vec![UnresolvedToken::new("REGISTRY_URL", "Container registry", "url")]; + + write_manifest(&resolved, &unresolved, &dest).expect("write_manifest failed"); + + let content = std::fs::read_to_string(&dest).unwrap(); + assert!(content.contains("PROJECT_NAME")); + assert!(content.contains("my-app")); + assert!(content.contains("REGISTRY_URL")); + } + + #[test] + fn test_write_manifest_empty_unresolved() { + let dir = TempDir::new().unwrap(); + let dest = dir.path().join("ci-manifest.toml"); + + let mut resolved = ResolvedTokenMap::new(); + resolved.insert("PROJECT_NAME".to_string(), "clean-app".to_string()); + + write_manifest(&resolved, &[], &dest).expect("write_manifest failed"); + + let content = std::fs::read_to_string(&dest).unwrap(); + assert!(content.contains("clean-app")); + } +} diff --git a/src/generator/ci_generation/triggers.rs b/src/generator/ci_generation/triggers.rs new file mode 100644 index 00000000..4933d669 --- /dev/null +++ b/src/generator/ci_generation/triggers.rs @@ -0,0 +1,132 @@ +//! CI Trigger Configuration — CI-18 +//! +//! Resolves `TriggerConfig` from the project's default branch and an optional +//! semver tag pattern detected in the repository's git history. + +use std::path::Path; +use std::process::Command; + +use crate::generator::ci_generation::{context::CiContext, schema::TriggerConfig}; + +/// Resolves the trigger configuration for a CI pipeline. +/// +/// Both push and PR triggers default to the project's detected default branch. +/// If the repository contains any tags matching the glob `v*`, the tag trigger +/// is enabled so release workflows fire automatically on versioned tags. +pub fn resolve_triggers(ctx: &CiContext) -> TriggerConfig { + let root = &ctx.analysis.project_root; + let branch = ctx.default_branch.clone(); + + TriggerConfig { + push_branches: vec![branch.clone()], + pr_branches: vec![branch], + tag_pattern: detect_semver_tag_pattern(root), + scheduled: Some("{{CRON_SCHEDULE}}".to_string()), + } +} + +/// Returns `Some("v*")` when the repo at `path` has at least one `v*` tag. +/// Returns `None` on any git error or if no such tags exist. +fn detect_semver_tag_pattern(path: &Path) -> Option { + let output = Command::new("git") + .args(["tag", "--list", "v*"]) + .current_dir(path) + .output() + .ok()?; + + if output.status.success() && !String::from_utf8_lossy(&output.stdout).trim().is_empty() { + Some("v*".to_string()) + } else { + None + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::generator::ci_generation::{context::CiContext, test_helpers::make_base_ctx}; + use tempfile::TempDir; + + fn ctx_on(root: &std::path::Path, branch: &str) -> CiContext { + CiContext { default_branch: branch.to_string(), ..make_base_ctx(root, "rust") } + } + + #[test] + fn test_push_and_pr_branches_match_default_branch() { + let dir = TempDir::new().unwrap(); + let triggers = resolve_triggers(&ctx_on(dir.path(), "develop")); + + assert_eq!(triggers.push_branches, vec!["develop"]); + assert_eq!(triggers.pr_branches, vec!["develop"]); + } + + #[test] + fn test_scheduled_emits_cron_placeholder() { + let dir = TempDir::new().unwrap(); + let triggers = resolve_triggers(&ctx_on(dir.path(), "main")); + assert_eq!(triggers.scheduled, Some("{{CRON_SCHEDULE}}".to_string())); + } + + #[test] + fn test_no_git_repo_yields_no_tag_pattern() { + let dir = TempDir::new().unwrap(); + // Plain temp dir — git command fails → tag_pattern is None. + let triggers = resolve_triggers(&ctx_on(dir.path(), "main")); + assert!(triggers.tag_pattern.is_none()); + } + + #[test] + fn test_semver_tag_detected() { + let dir = TempDir::new().unwrap(); + let root = dir.path(); + + // Bootstrap a minimal git repo with a semver tag. + let git = |args: &[&str]| { + Command::new("git") + .args(args) + .current_dir(root) + .env("GIT_AUTHOR_NAME", "ci-test") + .env("GIT_AUTHOR_EMAIL", "ci@test.local") + .env("GIT_COMMITTER_NAME", "ci-test") + .env("GIT_COMMITTER_EMAIL", "ci@test.local") + .env("GIT_CONFIG_NOSYSTEM", "1") + .output() + .expect("git command failed") + }; + + git(&["init"]); + git(&["commit", "--allow-empty", "-m", "init"]); + git(&["tag", "v1.0.0"]); + + let triggers = resolve_triggers(&ctx_on(root, "main")); + assert_eq!(triggers.tag_pattern, Some("v*".to_string())); + } + + #[test] + fn test_non_semver_tags_yield_no_tag_pattern() { + let dir = TempDir::new().unwrap(); + let root = dir.path(); + + let git = |args: &[&str]| { + Command::new("git") + .args(args) + .current_dir(root) + .env("GIT_AUTHOR_NAME", "ci-test") + .env("GIT_AUTHOR_EMAIL", "ci@test.local") + .env("GIT_COMMITTER_NAME", "ci-test") + .env("GIT_COMMITTER_EMAIL", "ci@test.local") + .env("GIT_CONFIG_NOSYSTEM", "1") + .output() + .expect("git command failed") + }; + + git(&["init"]); + git(&["commit", "--allow-empty", "-m", "init"]); + git(&["tag", "release-1.0"]); // no "v" prefix — should not match + + let triggers = resolve_triggers(&ctx_on(root, "main")); + assert!(triggers.tag_pattern.is_none()); + } +} diff --git a/src/generator/ci_generation/writer.rs b/src/generator/ci_generation/writer.rs new file mode 100644 index 00000000..0a8d9aaf --- /dev/null +++ b/src/generator/ci_generation/writer.rs @@ -0,0 +1,674 @@ +//! CI-20 — CI File Writer & Conflict Detection +//! +//! Writes generated CI files to the correct platform-specific paths. +//! Before writing each file the writer: +//! +//! 1. Validates the content is parseable YAML via a `serde_yaml` round-trip. +//! 2. Checks whether the target path already exists. +//! 3. If it exists and content differs, records a conflict with a unified diff. +//! The caller decides whether to overwrite (pass `force = true`) or skip. +//! +//! ## Output paths by format +//! +//! | Format | Path written | +//! |------------------|--------------------------------------| +//! | GitHub Actions | `.github/workflows/ci.yml` | +//! | Azure Pipelines | `azure-pipelines.yml` | +//! | Cloud Build | `cloudbuild.yaml` | +//! | Secrets doc | `.syncable/SECRETS_REQUIRED.md` | +//! +//! `write_ci_files` always writes all files for which content was provided; +//! callers build the `Vec` from the `CiPipeline` they assembled. +//! A `WriteSummary` is returned so the CLI can display a results table. + +use std::io::BufRead; +use std::path::{Path, PathBuf}; + +use similar::{ChangeTag, TextDiff}; + +use crate::cli::CiFormat; + +// ── Public types ────────────────────────────────────────────────────────────── + +/// Classifies the kind of file being written — used for display and path resolution. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum CiFileKind { + /// Main pipeline YAML (`.github/workflows/ci.yml`, `azure-pipelines.yml`, etc.) + Pipeline(CiFormat), + /// `.syncable/SECRETS_REQUIRED.md` + SecretsDoc, + /// Any other file with an explicit relative path. + Other(String), +} + +/// A generated file ready to be written. +#[derive(Debug, Clone)] +pub struct CiFile { + /// Content string (YAML or Markdown depending on kind). + pub content: String, + /// What kind of file this is — drives path resolution. + pub kind: CiFileKind, +} + +impl CiFile { + /// Constructs a pipeline YAML file for the given format. + pub fn pipeline(content: String, format: CiFormat) -> Self { + Self { content, kind: CiFileKind::Pipeline(format) } + } + + /// Constructs a secrets documentation file. + pub fn secrets_doc(content: String) -> Self { + Self { content, kind: CiFileKind::SecretsDoc } + } +} + +/// User's chosen resolution when a conflict is detected during interactive mode. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ConflictResolution { + /// Replace the existing file with the generated content. + Overwrite, + /// Write both versions into the file using git-style conflict markers. + Merge, + /// Leave the existing file unchanged. + Skip, +} + +/// Result of writing a single file. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum WriteOutcome { + /// File did not exist; was created. + Created, + /// File existed and was identical — no write needed. + Unchanged, + /// File existed with different content and `force = true` → overwritten. + Overwritten, + /// File existed with different content and `force = false` → not written. + Skipped, + /// File written with git-style conflict markers for manual resolution. + Merged, + /// Generated content failed the YAML validation round-trip. + InvalidYaml(String), +} + +/// Per-file result entry in `WriteSummary`. +#[derive(Debug, Clone)] +pub struct FileResult { + /// The resolved absolute path that was (or would have been) written. + pub path: PathBuf, + pub outcome: WriteOutcome, + /// Unified diff when `outcome == Overwritten | Skipped` and content differs. + pub diff: Option, +} + +/// Aggregated result returned by `write_ci_files`. +#[derive(Debug, Clone, Default)] +pub struct WriteSummary { + pub results: Vec, +} + +impl WriteSummary { + pub fn created(&self) -> usize { + self.results.iter().filter(|r| r.outcome == WriteOutcome::Created).count() + } + pub fn overwritten(&self) -> usize { + self.results.iter().filter(|r| r.outcome == WriteOutcome::Overwritten).count() + } + pub fn skipped(&self) -> usize { + self.results.iter().filter(|r| r.outcome == WriteOutcome::Skipped).count() + } + pub fn invalid(&self) -> usize { + self.results.iter().filter(|r| matches!(r.outcome, WriteOutcome::InvalidYaml(_))).count() + } + pub fn merged(&self) -> usize { + self.results.iter().filter(|r| r.outcome == WriteOutcome::Merged).count() + } + pub fn has_conflicts(&self) -> bool { + self.results.iter().any(|r| r.outcome == WriteOutcome::Skipped) + } + + /// Returns a human-readable single-line summary. + pub fn display_line(&self) -> String { + format!( + "{} created, {} overwritten, {} merged, {} skipped, {} invalid", + self.created(), + self.overwritten(), + self.merged(), + self.skipped(), + self.invalid(), + ) + } +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Writes `files` into `output_dir`, respecting the `force` flag. +/// +/// `force = true` — overwrite any existing files without prompting. +/// `force = false` — skip files that differ from their existing on-disk version +/// and record them as `Skipped` with a diff in the summary. +/// +/// Callers that need interactive conflict resolution should inspect +/// `summary.has_conflicts()` and re-invoke with their chosen policy. +pub fn write_ci_files( + files: &[CiFile], + output_dir: &Path, + force: bool, +) -> crate::Result { + let mut summary = WriteSummary::default(); + + for file in files { + let path = resolve_path(output_dir, &file.kind); + let result = write_one(file, &path, force)?; + summary.results.push(result); + } + + Ok(summary) +} + +/// Interactive variant of `write_ci_files`. +/// +/// Runs a first pass with `force = false` to detect conflicts, then for each +/// `Skipped` file reads one line from `reader` to ask the user what to do: +/// +/// - `o` → overwrite (replace existing file with generated content) +/// - `m` → merge (write both versions with git-style conflict markers) +/// - `s` / anything else → skip (keep existing file) +/// +/// `reader` is generic over `BufRead` so tests can inject a cursor instead of +/// reading from real stdin. +pub fn write_ci_files_interactive( + files: &[CiFile], + output_dir: &Path, + reader: &mut R, +) -> crate::Result { + let mut summary = write_ci_files(files, output_dir, false)?; + + for (file, result) in files.iter().zip(summary.results.iter_mut()) { + if result.outcome != WriteOutcome::Skipped { + continue; + } + let diff = result.diff.as_deref().unwrap_or(""); + let resolution = prompt_conflict_resolution(&result.path, diff, reader); + match resolution { + ConflictResolution::Overwrite => { + do_write(&result.path, &file.content)?; + result.outcome = WriteOutcome::Overwritten; + } + ConflictResolution::Merge => { + let existing = std::fs::read_to_string(&result.path)?; + let merged = conflict_markers(&existing, &file.content); + do_write(&result.path, &merged)?; + result.outcome = WriteOutcome::Merged; + } + ConflictResolution::Skip => {} + } + } + + Ok(summary) +} + +/// Reads a single conflict-resolution choice from `reader`. +/// +/// Prints a prompt line to stderr (non-blocking in tests). Parses: +/// - `"o"` → `Overwrite` +/// - `"m"` → `Merge` +/// - anything else (including `"s"`) → `Skip` +pub fn prompt_conflict_resolution( + path: &Path, + _diff: &str, + reader: &mut R, +) -> ConflictResolution { + eprintln!( + " conflict: {} [o]verwrite / [m]erge / [s]kip?", + path.display() + ); + let mut line = String::new(); + let _ = reader.read_line(&mut line); + match line.trim() { + "o" => ConflictResolution::Overwrite, + "m" => ConflictResolution::Merge, + _ => ConflictResolution::Skip, + } +} + +/// Renders a formatted table summarising `WriteSummary` results. +/// +/// Uses the box-drawing style consistent with the rest of the codebase. +/// Returns a `String` so the caller decides when/how to print it. +pub fn render_summary_table(summary: &WriteSummary) -> String { + const PATH_W: usize = 44; + const OUT_W: usize = 12; + const LINE_W: usize = PATH_W + OUT_W + 5; // borders + padding + + let ruler = "─".repeat(LINE_W); + let mut out = String::new(); + + out.push_str(&format!("ā”Œā”€ CI Files Written {}┐\n", "─".repeat(LINE_W - 20))); + out.push_str(&format!( + "│ {: PathBuf { + match kind { + CiFileKind::Pipeline(fmt) => output_dir.join(pipeline_path(fmt)), + CiFileKind::SecretsDoc => output_dir.join(".syncable").join("SECRETS_REQUIRED.md"), + CiFileKind::Other(rel) => output_dir.join(rel), + } +} + +/// Maps a `CiFormat` to the conventional relative file path. +pub fn pipeline_path(format: &CiFormat) -> &'static str { + match format { + CiFormat::GithubActions => ".github/workflows/ci.yml", + CiFormat::AzurePipelines => "azure-pipelines.yml", + CiFormat::CloudBuild => "cloudbuild.yaml", + } +} + +// ── Internal helpers ───────────────────────────────────────────────────────── + +/// Validates, diffs, and conditionally writes a single `CiFile`. +fn write_one(file: &CiFile, path: &Path, force: bool) -> crate::Result { + // Validate YAML for pipeline files; Markdown does not need round-trip. + if matches!(file.kind, CiFileKind::Pipeline(_)) { + if let Err(e) = validate_yaml(&file.content) { + return Ok(FileResult { + path: path.to_path_buf(), + outcome: WriteOutcome::InvalidYaml(e), + diff: None, + }); + } + } + + // Check for conflict with existing file + if path.exists() { + let existing = std::fs::read_to_string(path)?; + if existing == file.content { + return Ok(FileResult { + path: path.to_path_buf(), + outcome: WriteOutcome::Unchanged, + diff: None, + }); + } + + let diff = build_diff(&existing, &file.content); + + if force { + do_write(path, &file.content)?; + return Ok(FileResult { + path: path.to_path_buf(), + outcome: WriteOutcome::Overwritten, + diff: Some(diff), + }); + } else { + return Ok(FileResult { + path: path.to_path_buf(), + outcome: WriteOutcome::Skipped, + diff: Some(diff), + }); + } + } + + // New file — create parent directories and write + do_write(path, &file.content)?; + Ok(FileResult { + path: path.to_path_buf(), + outcome: WriteOutcome::Created, + diff: None, + }) +} + +/// Creates parent directories and writes `content` to `path`. +fn do_write(path: &Path, content: &str) -> crate::Result<()> { + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent)?; + } + std::fs::write(path, content)?; + Ok(()) +} + +/// Round-trips `content` through `serde_yaml` to confirm it is parseable. +/// Returns the error message on failure. +fn validate_yaml(content: &str) -> Result<(), String> { + serde_yaml::from_str::(content) + .map(|_| ()) + .map_err(|e| e.to_string()) +} + +/// Writes both `old` and `new` into a single file using git-style conflict markers. +fn conflict_markers(old: &str, new: &str) -> String { + format!("<<<<<<< current\n{}=======\n{}>>>>>>> generated\n", old, new) +} + +/// Returns a short human-readable label for a `WriteOutcome`. +fn outcome_label(outcome: &WriteOutcome) -> &'static str { + match outcome { + WriteOutcome::Created => "created", + WriteOutcome::Unchanged => "unchanged", + WriteOutcome::Overwritten => "overwritten", + WriteOutcome::Skipped => "skipped", + WriteOutcome::Merged => "merged", + WriteOutcome::InvalidYaml(_) => "invalid yaml", + } +} + +/// Returns the last two components of `path` joined by `/` for compact display. +fn compact_path(path: &Path) -> String { + let parts: Vec<_> = path.components().collect(); + if parts.len() >= 2 { + let n = parts.len(); + format!("{}/{}", + parts[n - 2].as_os_str().to_string_lossy(), + parts[n - 1].as_os_str().to_string_lossy() + ) + } else { + path.display().to_string() + } +} + +/// Builds a compact unified diff for display purposes. +fn build_diff(old: &str, new: &str) -> String { + let diff = TextDiff::from_lines(old, new); + let mut out = String::new(); + for change in diff.iter_all_changes() { + let prefix = match change.tag() { + ChangeTag::Delete => "-", + ChangeTag::Insert => "+", + ChangeTag::Equal => " ", + }; + out.push_str(&format!("{}{}", prefix, change)); + } + out +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + fn tmp_dir(name: &str) -> PathBuf { + let dir = std::env::temp_dir().join(format!("syncable_writer_test_{}_{}", name, + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .subsec_nanos() + )); + std::fs::create_dir_all(&dir).unwrap(); + dir + } + + const VALID_YAML: &str = "name: CI\non:\n push:\n branches: [main]\njobs:\n test:\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v4\n"; + const INVALID_YAML: &str = "name: CI\n bad_indent:\n - key: [unclosed"; + + // ── resolve_path ─────────────────────────────────────────────────────── + + #[test] + fn test_github_actions_path() { + let p = resolve_path(Path::new("/project"), &CiFileKind::Pipeline(CiFormat::GithubActions)); + assert_eq!(p, PathBuf::from("/project/.github/workflows/ci.yml")); + } + + #[test] + fn test_azure_pipelines_path() { + let p = resolve_path(Path::new("/project"), &CiFileKind::Pipeline(CiFormat::AzurePipelines)); + assert_eq!(p, PathBuf::from("/project/azure-pipelines.yml")); + } + + #[test] + fn test_cloud_build_path() { + let p = resolve_path(Path::new("/project"), &CiFileKind::Pipeline(CiFormat::CloudBuild)); + assert_eq!(p, PathBuf::from("/project/cloudbuild.yaml")); + } + + #[test] + fn test_secrets_doc_path() { + let p = resolve_path(Path::new("/project"), &CiFileKind::SecretsDoc); + assert_eq!(p, PathBuf::from("/project/.syncable/SECRETS_REQUIRED.md")); + } + + // ── write_ci_files — new files ───────────────────────────────────────── + + #[test] + fn test_creates_new_pipeline_file() { + let dir = tmp_dir("new"); + let files = vec![CiFile::pipeline(VALID_YAML.to_string(), CiFormat::GithubActions)]; + let summary = write_ci_files(&files, &dir, false).unwrap(); + assert_eq!(summary.created(), 1); + assert!(dir.join(".github/workflows/ci.yml").exists()); + std::fs::remove_dir_all(&dir).ok(); + } + + #[test] + fn test_creates_parent_directories() { + let dir = tmp_dir("parents"); + let files = vec![CiFile::pipeline(VALID_YAML.to_string(), CiFormat::GithubActions)]; + write_ci_files(&files, &dir, false).unwrap(); + assert!(dir.join(".github").join("workflows").is_dir()); + std::fs::remove_dir_all(&dir).ok(); + } + + #[test] + fn test_creates_secrets_doc_file() { + let dir = tmp_dir("secrets_doc"); + let files = vec![CiFile::secrets_doc("# Secrets\n".to_string())]; + let summary = write_ci_files(&files, &dir, false).unwrap(); + assert_eq!(summary.created(), 1); + assert!(dir.join(".syncable").join("SECRETS_REQUIRED.md").exists()); + std::fs::remove_dir_all(&dir).ok(); + } + + // ── write_ci_files — YAML validation ────────────────────────────────── + + #[test] + fn test_invalid_yaml_results_in_invalid_outcome() { + let dir = tmp_dir("invalid"); + let files = vec![CiFile::pipeline(INVALID_YAML.to_string(), CiFormat::GithubActions)]; + let summary = write_ci_files(&files, &dir, false).unwrap(); + assert_eq!(summary.invalid(), 1); + assert_eq!(summary.created(), 0); + // File must NOT be written + assert!(!dir.join(".github/workflows/ci.yml").exists()); + std::fs::remove_dir_all(&dir).ok(); + } + + #[test] + fn test_markdown_bypasses_yaml_validation() { + // SecretsDoc is Markdown — invalid YAML characters are fine + let dir = tmp_dir("md_bypass"); + let files = vec![CiFile::secrets_doc("# Secrets\n: not valid yaml but ok\n".to_string())]; + let summary = write_ci_files(&files, &dir, false).unwrap(); + assert_eq!(summary.invalid(), 0); + assert_eq!(summary.created(), 1); + std::fs::remove_dir_all(&dir).ok(); + } + + // ── write_ci_files — conflict handling ──────────────────────────────── + + #[test] + fn test_unchanged_file_not_rewritten() { + let dir = tmp_dir("unchanged"); + // Write once + let files = vec![CiFile::pipeline(VALID_YAML.to_string(), CiFormat::GithubActions)]; + write_ci_files(&files, &dir, false).unwrap(); + // Write again with identical content + let summary = write_ci_files(&files, &dir, false).unwrap(); + assert_eq!(summary.results[0].outcome, WriteOutcome::Unchanged); + std::fs::remove_dir_all(&dir).ok(); + } + + #[test] + fn test_conflict_without_force_gives_skipped() { + let dir = tmp_dir("conflict_skip"); + let files = vec![CiFile::pipeline(VALID_YAML.to_string(), CiFormat::GithubActions)]; + write_ci_files(&files, &dir, false).unwrap(); + // Write conflicting content without force + let new_content = VALID_YAML.replace("CI", "CI-MODIFIED"); + let files2 = vec![CiFile::pipeline(new_content, CiFormat::GithubActions)]; + let summary = write_ci_files(&files2, &dir, false).unwrap(); + assert_eq!(summary.skipped(), 1); + assert!(summary.has_conflicts()); + std::fs::remove_dir_all(&dir).ok(); + } + + #[test] + fn test_conflict_with_force_gives_overwritten() { + let dir = tmp_dir("conflict_force"); + let files = vec![CiFile::pipeline(VALID_YAML.to_string(), CiFormat::GithubActions)]; + write_ci_files(&files, &dir, false).unwrap(); + let new_content = VALID_YAML.replace("CI", "CI-MODIFIED"); + let files2 = vec![CiFile::pipeline(new_content.clone(), CiFormat::GithubActions)]; + let summary = write_ci_files(&files2, &dir, true).unwrap(); + assert_eq!(summary.overwritten(), 1); + let written = std::fs::read_to_string(dir.join(".github/workflows/ci.yml")).unwrap(); + assert_eq!(written, new_content); + std::fs::remove_dir_all(&dir).ok(); + } + + #[test] + fn test_conflict_includes_diff() { + let dir = tmp_dir("diff"); + let files = vec![CiFile::pipeline(VALID_YAML.to_string(), CiFormat::GithubActions)]; + write_ci_files(&files, &dir, false).unwrap(); + let new_content = VALID_YAML.replace("CI", "CI-MODIFIED"); + let files2 = vec![CiFile::pipeline(new_content, CiFormat::GithubActions)]; + let summary = write_ci_files(&files2, &dir, false).unwrap(); + assert!(summary.results[0].diff.is_some()); + let diff = summary.results[0].diff.as_ref().unwrap(); + assert!(diff.contains('-') || diff.contains('+')); + std::fs::remove_dir_all(&dir).ok(); + } + + // ── WriteSummary display ─────────────────────────────────────────────── + + #[test] + fn test_display_line_format() { + let dir = tmp_dir("display"); + let files = vec![CiFile::pipeline(VALID_YAML.to_string(), CiFormat::GithubActions)]; + let summary = write_ci_files(&files, &dir, false).unwrap(); + let line = summary.display_line(); + assert!(line.contains("1 created")); + assert!(line.contains("0 skipped")); + std::fs::remove_dir_all(&dir).ok(); + } + + #[test] + fn test_render_summary_table_contains_headers() { + let dir = tmp_dir("table"); + let files = vec![CiFile::pipeline(VALID_YAML.to_string(), CiFormat::GithubActions)]; + let summary = write_ci_files(&files, &dir, false).unwrap(); + let table = render_summary_table(&summary); + assert!(table.contains("CI Files Written")); + assert!(table.contains("File")); + assert!(table.contains("Outcome")); + assert!(table.contains("created")); + std::fs::remove_dir_all(&dir).ok(); + } + + // ── prompt_conflict_resolution ──────────────────────────────────────── + + #[test] + fn test_prompt_overwrite() { + let mut reader = std::io::Cursor::new("o\n"); + let res = prompt_conflict_resolution(Path::new("/tmp/ci.yml"), "", &mut reader); + assert_eq!(res, ConflictResolution::Overwrite); + } + + #[test] + fn test_prompt_merge() { + let mut reader = std::io::Cursor::new("m\n"); + let res = prompt_conflict_resolution(Path::new("/tmp/ci.yml"), "", &mut reader); + assert_eq!(res, ConflictResolution::Merge); + } + + #[test] + fn test_prompt_skip() { + let mut reader = std::io::Cursor::new("s\n"); + let res = prompt_conflict_resolution(Path::new("/tmp/ci.yml"), "", &mut reader); + assert_eq!(res, ConflictResolution::Skip); + } + + #[test] + fn test_prompt_unrecognised_defaults_to_skip() { + let mut reader = std::io::Cursor::new("x\n"); + let res = prompt_conflict_resolution(Path::new("/tmp/ci.yml"), "", &mut reader); + assert_eq!(res, ConflictResolution::Skip); + } + + // ── write_ci_files_interactive ──────────────────────────────────────── + + #[test] + fn test_interactive_overwrite_resolves_conflict() { + let dir = tmp_dir("interactive_ow"); + let files = vec![CiFile::pipeline(VALID_YAML.to_string(), CiFormat::GithubActions)]; + write_ci_files(&files, &dir, false).unwrap(); + let new_content = VALID_YAML.replace("CI", "CI-MODIFIED"); + let files2 = vec![CiFile::pipeline(new_content.clone(), CiFormat::GithubActions)]; + // Simulate user typing "o" at the prompt + let mut reader = std::io::Cursor::new("o\n"); + let summary = write_ci_files_interactive(&files2, &dir, &mut reader).unwrap(); + assert_eq!(summary.overwritten(), 1); + let written = std::fs::read_to_string(dir.join(".github/workflows/ci.yml")).unwrap(); + assert_eq!(written, new_content); + std::fs::remove_dir_all(&dir).ok(); + } + + #[test] + fn test_interactive_merge_writes_conflict_markers() { + let dir = tmp_dir("interactive_merge"); + let files = vec![CiFile::pipeline(VALID_YAML.to_string(), CiFormat::GithubActions)]; + write_ci_files(&files, &dir, false).unwrap(); + let new_content = VALID_YAML.replace("CI", "CI-MODIFIED"); + let files2 = vec![CiFile::pipeline(new_content, CiFormat::GithubActions)]; + // Simulate user typing "m" at the prompt + let mut reader = std::io::Cursor::new("m\n"); + let summary = write_ci_files_interactive(&files2, &dir, &mut reader).unwrap(); + assert_eq!(summary.merged(), 1); + let written = std::fs::read_to_string(dir.join(".github/workflows/ci.yml")).unwrap(); + assert!(written.contains("<<<<<<< current")); + assert!(written.contains(">>>>>>> generated")); + std::fs::remove_dir_all(&dir).ok(); + } + + #[test] + fn test_interactive_skip_leaves_existing_file() { + let dir = tmp_dir("interactive_skip"); + let files = vec![CiFile::pipeline(VALID_YAML.to_string(), CiFormat::GithubActions)]; + write_ci_files(&files, &dir, false).unwrap(); + let new_content = VALID_YAML.replace("CI", "CI-MODIFIED"); + let files2 = vec![CiFile::pipeline(new_content, CiFormat::GithubActions)]; + let mut reader = std::io::Cursor::new("s\n"); + let summary = write_ci_files_interactive(&files2, &dir, &mut reader).unwrap(); + assert_eq!(summary.skipped(), 1); + let written = std::fs::read_to_string(dir.join(".github/workflows/ci.yml")).unwrap(); + // Original content must be intact + assert_eq!(written, VALID_YAML); + std::fs::remove_dir_all(&dir).ok(); + } +} diff --git a/src/generator/mod.rs b/src/generator/mod.rs index 56f8ea9b..24e2ee18 100644 --- a/src/generator/mod.rs +++ b/src/generator/mod.rs @@ -1,6 +1,8 @@ use crate::analyzer::ProjectAnalysis; use crate::error::Result; +pub mod cd_generation; +pub mod ci_generation; pub mod compose_gen; pub mod dockerfile_gen; pub mod templates; diff --git a/src/handlers/generate.rs b/src/handlers/generate.rs index 2a18268c..a4d0c74c 100644 --- a/src/handlers/generate.rs +++ b/src/handlers/generate.rs @@ -524,3 +524,405 @@ fn count_severities_helmlint( } (e, w, i) } + +/// CI-01: entry-point stub for `sync-ctl generate ci`. +/// +/// Collects project context, assembles a `CiPipeline`, renders it to YAML, +/// and either prints it (dry-run) or writes it to disk. +pub fn handle_generate_ci( + path: std::path::PathBuf, + platform: crate::cli::CiPlatform, + format: Option, + dry_run: bool, + output: Option, + env_prefix: Option, + skip_docker: bool, + notify: bool, +) -> crate::Result<()> { + use crate::cli::{CiFormat, CiPlatform}; + use crate::generator::ci_generation::{ + context::collect_ci_context, + dry_run::print_dry_run, + notify_step::{render_notify_yaml, NotifyStep}, + pipeline::build_ci_pipeline, + secrets_doc::generate_secrets_doc, + templates, + token_resolver::resolve_tokens, + writer::{write_ci_files, CiFile}, + }; + + // Resolve effective format from CLI choice or platform default. + let effective_format = format.unwrap_or(match platform { + CiPlatform::Azure => CiFormat::AzurePipelines, + CiPlatform::Gcp => CiFormat::CloudBuild, + CiPlatform::Hetzner => CiFormat::GithubActions, + }); + + // ── Context collection ──────────────────────────────────────────────── + let mut ctx = collect_ci_context(&path, platform, effective_format.clone())?; + if let Some(prefix) = env_prefix { + ctx.env_prefix = Some(prefix); + } + + // ── Pipeline assembly ───────────────────────────────────────────────── + let mut pipeline = build_ci_pipeline(&ctx, skip_docker); + + // ── Token resolution (two-pass) ─────────────────────────────────────── + resolve_tokens(&ctx, &mut pipeline); + + // ── YAML rendering ──────────────────────────────────────────────────── + let pipeline_yaml = match effective_format { + CiFormat::GithubActions => templates::github_actions::render(&pipeline), + CiFormat::AzurePipelines => templates::azure_pipelines::render(&pipeline), + CiFormat::CloudBuild => templates::cloud_build::render(&pipeline), + }; + + // Append notify step snippet when requested (CI-24). + let notify_snippet = if notify { + render_notify_yaml(&NotifyStep::default()) + } else { + String::new() + }; + let full_pipeline_yaml = format!("{}{}", pipeline_yaml, notify_snippet); + + // ── Secrets documentation ───────────────────────────────────────────── + let secrets_content = + generate_secrets_doc(&full_pipeline_yaml, ctx.platform.clone(), effective_format.clone()); + + // ── Dry-run or write ────────────────────────────────────────────────── + let output_dir = output.unwrap_or_else(|| path.clone()); + + let files = vec![ + CiFile::pipeline(full_pipeline_yaml, effective_format.clone()), + CiFile::secrets_doc(secrets_content), + ]; + + if dry_run { + print_dry_run(&files, &pipeline, &output_dir); + } else { + let summary = write_ci_files(&files, &output_dir, false)?; + println!( + "āœ… CI pipeline generated — {} created, {} skipped", + summary.created() + summary.overwritten(), + summary.skipped(), + ); + if summary.invalid() > 0 { + eprintln!("āš ļø {} file(s) had invalid YAML and were not written.", summary.invalid()); + } + } + + // ── Telemetry (CI-27) ───────────────────────────────────────────────── + if let Some(client) = crate::telemetry::get_telemetry_client() { + use serde_json::json; + let total = pipeline.unresolved_tokens.len() + + pipeline + .triggers + .push_branches + .len(); // non-zero field just to avoid div-by-zero + let resolved_count = { + // Estimate: each resolved token reduces the placeholder count. + // unresolved_tokens holds only those that remain after resolution. + let placeholder_count = pipeline.unresolved_tokens.len(); + // A rough 5-token baseline (RUNTIME_VERSION, TEST_COMMAND, BUILD_COMMAND, + // REGISTRY_URL, IMAGE_NAME) for the resolution rate denominator. + let baseline = 5usize; + let rate = if baseline > 0 { + let resolved = baseline.saturating_sub(placeholder_count); + (resolved as f64 / baseline as f64 * 100.0).round() as u64 + } else { + 100 + }; + rate + }; + let _ = total; // suppress unused warning + + let mut props = std::collections::HashMap::new(); + props.insert("platform".to_string(), json!(format!("{:?}", ctx.platform))); + props.insert("format".to_string(), json!(format!("{:?}", effective_format))); + props.insert("language".to_string(), json!(ctx.primary_language)); + props.insert("has_docker".to_string(), json!(ctx.has_dockerfile)); + props.insert("monorepo".to_string(), json!(ctx.monorepo)); + props.insert("token_resolution_rate".to_string(), json!(resolved_count)); + client.track_event("generate_ci", props); + } + + Ok(()) +} + +/// Collects project context, assembles a `CdPipeline`, renders it to YAML, +/// and either prints it (dry-run) or writes it to disk. +pub fn handle_generate_cd( + path: std::path::PathBuf, + platform: crate::cli::CdPlatform, + target: Option, + registry: Option, + image_name: Option, + dry_run: bool, + output: Option, + force: bool, +) -> crate::Result<()> { + use crate::generator::cd_generation::{ + context::{ + self, CdPlatform as CtxPlatform, DeployTarget, Registry, + }, + pipeline::build_cd_pipeline, + templates, + token_resolver::resolve_tokens, + writer::{print_cd_dry_run, write_cd_files, CdFile}, + }; + + // ── Map CLI enums to context enums ──────────────────────────────────── + let ctx_platform = match platform { + crate::cli::CdPlatform::Azure => CtxPlatform::Azure, + crate::cli::CdPlatform::Gcp => CtxPlatform::Gcp, + crate::cli::CdPlatform::Hetzner => CtxPlatform::Hetzner, + }; + + let ctx_target = target.map(|t| match t { + crate::cli::CdTarget::AppService => DeployTarget::AppService, + crate::cli::CdTarget::Aks => DeployTarget::Aks, + crate::cli::CdTarget::ContainerApps => DeployTarget::ContainerApps, + crate::cli::CdTarget::CloudRun => DeployTarget::CloudRun, + crate::cli::CdTarget::Gke => DeployTarget::Gke, + crate::cli::CdTarget::Vps => DeployTarget::Vps, + crate::cli::CdTarget::HetznerK8s => DeployTarget::HetznerK8s, + crate::cli::CdTarget::Coolify => DeployTarget::Coolify, + }); + + let ctx_registry = registry.map(|r| match r { + crate::cli::CdRegistry::Acr => Registry::Acr, + crate::cli::CdRegistry::Gar => Registry::Gar, + crate::cli::CdRegistry::Ghcr => Registry::Ghcr, + }); + + // ── Context collection ──────────────────────────────────────────────── + let ctx = context::collect_cd_context( + &path, + ctx_platform.clone(), + ctx_target, + None, // environments: use defaults + ctx_registry, + image_name, + )?; + + // ── Pipeline assembly ───────────────────────────────────────────────── + let mut pipeline = build_cd_pipeline(&ctx); + + // ── Token resolution (two-pass) ─────────────────────────────────────── + resolve_tokens(&ctx, &mut pipeline); + + // ── YAML rendering ──────────────────────────────────────────────────── + let pipeline_yaml = match ctx_platform { + CtxPlatform::Azure => templates::azure::render(&pipeline), + CtxPlatform::Gcp => templates::gcp::render(&pipeline), + CtxPlatform::Hetzner => templates::hetzner::render(&pipeline), + }; + + // ── Manifest content ────────────────────────────────────────────────── + let manifest_content = toml::to_string_pretty(&pipeline).unwrap_or_default(); + + // ── Dry-run or write ────────────────────────────────────────────────── + let output_dir = output.unwrap_or_else(|| path.clone()); + + let files = vec![ + CdFile::pipeline(pipeline_yaml, ctx_platform.clone()), + CdFile::manifest(manifest_content), + ]; + + if dry_run { + print_cd_dry_run(&files); + } else { + let summary = write_cd_files(&files, &output_dir, force)?; + println!( + "āœ… CD pipeline generated — {} created, {} overwritten, {} skipped", + summary.created(), summary.overwritten(), summary.skipped(), + ); + } + + // ── Telemetry ───────────────────────────────────────────────────────── + if let Some(client) = crate::telemetry::get_telemetry_client() { + use serde_json::json; + let mut props = std::collections::HashMap::new(); + props.insert("platform".to_string(), json!(format!("{:?}", ctx_platform))); + props.insert( + "deploy_target".to_string(), + json!(format!("{}", ctx.deploy_target)), + ); + props.insert("registry".to_string(), json!(format!("{}", ctx.registry))); + props.insert("has_docker".to_string(), json!(ctx.has_dockerfile)); + props.insert("has_migration".to_string(), json!(ctx.migration_tool.is_some())); + client.track_event("generate_cd", props); + } + + Ok(()) +} + +/// Combined CI + CD generation (CD-23). +/// +/// Runs both generators from a single `ProjectAnalysis`, cross-links the +/// `IMAGE_TAG` environment variable between CI and CD manifests, and produces +/// a merged `SECRETS_REQUIRED.md`. +pub fn handle_generate_cicd( + path: std::path::PathBuf, + platform: crate::cli::CdPlatform, + ci_format: Option, + target: Option, + registry: Option, + image_name: Option, + dry_run: bool, + output: Option, + force: bool, + notify: bool, +) -> crate::Result<()> { + use crate::cli::{CiFormat, CiPlatform}; + use crate::generator::cd_generation::{ + context::{ + self as cd_ctx, CdPlatform as CtxCdPlatform, DeployTarget, Registry, + }, + pipeline::build_cd_pipeline, + secrets_doc as cd_secrets_doc, + templates as cd_templates, + token_resolver::resolve_tokens as resolve_cd_tokens, + writer::{print_cd_dry_run, write_cd_files, CdFile}, + }; + use crate::generator::ci_generation::{ + context::collect_ci_context, + dry_run::print_dry_run as print_ci_dry_run, + notify_step::{render_notify_yaml, NotifyStep}, + pipeline::build_ci_pipeline, + secrets_doc::generate_secrets_doc as generate_ci_secrets_doc, + templates as ci_templates, + token_resolver::resolve_tokens as resolve_ci_tokens, + writer::{write_ci_files, CiFile}, + }; + + println!("šŸš€ Generating CI + CD pipelines for {}", path.display()); + + // ── Map CdPlatform → CiPlatform ────────────────────────────────────── + let ci_platform = match platform { + crate::cli::CdPlatform::Azure => CiPlatform::Azure, + crate::cli::CdPlatform::Gcp => CiPlatform::Gcp, + crate::cli::CdPlatform::Hetzner => CiPlatform::Hetzner, + }; + + let effective_ci_format = ci_format.unwrap_or(match ci_platform { + CiPlatform::Azure => CiFormat::AzurePipelines, + CiPlatform::Gcp => CiFormat::CloudBuild, + CiPlatform::Hetzner => CiFormat::GithubActions, + }); + + // ── Map CLI CD enums ───────────────────────────────────────────────── + let ctx_cd_platform = match platform { + crate::cli::CdPlatform::Azure => CtxCdPlatform::Azure, + crate::cli::CdPlatform::Gcp => CtxCdPlatform::Gcp, + crate::cli::CdPlatform::Hetzner => CtxCdPlatform::Hetzner, + }; + + let ctx_target = target.map(|t| match t { + crate::cli::CdTarget::AppService => DeployTarget::AppService, + crate::cli::CdTarget::Aks => DeployTarget::Aks, + crate::cli::CdTarget::ContainerApps => DeployTarget::ContainerApps, + crate::cli::CdTarget::CloudRun => DeployTarget::CloudRun, + crate::cli::CdTarget::Gke => DeployTarget::Gke, + crate::cli::CdTarget::Vps => DeployTarget::Vps, + crate::cli::CdTarget::HetznerK8s => DeployTarget::HetznerK8s, + crate::cli::CdTarget::Coolify => DeployTarget::Coolify, + }); + + let ctx_registry = registry.map(|r| match r { + crate::cli::CdRegistry::Acr => Registry::Acr, + crate::cli::CdRegistry::Gar => Registry::Gar, + crate::cli::CdRegistry::Ghcr => Registry::Ghcr, + }); + + // ── 1. CI generation ───────────────────────────────────────────────── + let ci_ctx = collect_ci_context(&path, ci_platform, effective_ci_format.clone())?; + let mut ci_pipeline = build_ci_pipeline(&ci_ctx, false); + resolve_ci_tokens(&ci_ctx, &mut ci_pipeline); + + let ci_yaml = match effective_ci_format { + CiFormat::GithubActions => ci_templates::github_actions::render(&ci_pipeline), + CiFormat::AzurePipelines => ci_templates::azure_pipelines::render(&ci_pipeline), + CiFormat::CloudBuild => ci_templates::cloud_build::render(&ci_pipeline), + }; + + let notify_snippet = if notify { + render_notify_yaml(&NotifyStep::default()) + } else { + String::new() + }; + let full_ci_yaml = format!("{ci_yaml}{notify_snippet}"); + + // ── 2. CD generation ───────────────────────────────────────────────── + let cd_ctx = cd_ctx::collect_cd_context( + &path, + ctx_cd_platform.clone(), + ctx_target, + None, + ctx_registry, + image_name, + )?; + let mut cd_pipeline = build_cd_pipeline(&cd_ctx); + resolve_cd_tokens(&cd_ctx, &mut cd_pipeline); + + let cd_yaml = match ctx_cd_platform { + CtxCdPlatform::Azure => cd_templates::azure::render(&cd_pipeline), + CtxCdPlatform::Gcp => cd_templates::gcp::render(&cd_pipeline), + CtxCdPlatform::Hetzner => cd_templates::hetzner::render(&cd_pipeline), + }; + + // ── 3. Cross-linked secrets doc ────────────────────────────────────── + let ci_secrets_md = + generate_ci_secrets_doc(&full_ci_yaml, ci_platform, effective_ci_format.clone()); + let cd_secrets_md = cd_secrets_doc::generate_cd_secrets_doc(&cd_yaml, &ctx_cd_platform); + let merged_secrets = format!( + "# Required Secrets & Variables\n\n\ + > Auto-generated by `sync-ctl generate ci-cd`.\n\ + > Both CI and CD secrets are listed below, deduplicated.\n\n\ + ## CI Pipeline Secrets\n\n{ci_secrets_md}\n\n\ + ---\n\n\ + ## CD Pipeline Secrets\n\n{cd_secrets_md}\n" + ); + + // ── 4. Manifest content ────────────────────────────────────────────── + let cd_manifest = toml::to_string_pretty(&cd_pipeline).unwrap_or_default(); + + // ── 5. Output ──────────────────────────────────────────────────────── + let output_dir = output.unwrap_or_else(|| path.clone()); + + let ci_files = vec![ + CiFile::pipeline(full_ci_yaml, effective_ci_format.clone()), + CiFile::secrets_doc(merged_secrets), + ]; + + let cd_files = vec![ + CdFile::pipeline(cd_yaml, ctx_cd_platform.clone()), + CdFile::manifest(cd_manifest), + ]; + + if dry_run { + println!("\n── CI Pipeline ────────────────────────────────────"); + print_ci_dry_run(&ci_files, &ci_pipeline, &output_dir); + println!("\n── CD Pipeline ────────────────────────────────────"); + print_cd_dry_run(&cd_files); + } else { + let ci_summary = write_ci_files(&ci_files, &output_dir, force)?; + let cd_summary = write_cd_files(&cd_files, &output_dir, force)?; + println!( + "āœ… CI + CD pipelines generated — CI: {} created, CD: {} created", + ci_summary.created() + ci_summary.overwritten(), + cd_summary.created() + cd_summary.overwritten(), + ); + } + + // ── Telemetry ──────────────────────────────────────────────────────── + if let Some(client) = crate::telemetry::get_telemetry_client() { + use serde_json::json; + let mut props = std::collections::HashMap::new(); + props.insert("platform".to_string(), json!(format!("{:?}", platform))); + props.insert("mode".to_string(), json!("ci-cd")); + client.track_event("generate_cicd", props); + } + + Ok(()) +} diff --git a/src/handlers/mod.rs b/src/handlers/mod.rs index 347d9db4..cbc800f8 100644 --- a/src/handlers/mod.rs +++ b/src/handlers/mod.rs @@ -12,7 +12,7 @@ pub mod vulnerabilities; // Re-export all handler functions pub use analyze::handle_analyze; pub use dependencies::handle_dependencies; -pub use generate::{handle_generate, handle_validate}; +pub use generate::{handle_generate, handle_generate_cd, handle_generate_ci, handle_generate_cicd, handle_validate}; pub use optimize::{OptimizeOptions, handle_optimize}; pub use security::handle_security; pub use tools::handle_tools; diff --git a/src/lib.rs b/src/lib.rs index db7a5cba..a97ddca4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -44,18 +44,59 @@ pub async fn run_command( Err(e) => Err(e), } } - Commands::Generate { - path, - output, - dockerfile, - compose, - terraform, - all, - dry_run, - force, - } => handlers::handle_generate( - path, output, dockerfile, compose, terraform, all, dry_run, force, - ), + Commands::Generate { command } => match command { + cli::GenerateCommand::Iac { + path, + output, + dockerfile, + compose, + terraform, + all, + dry_run, + force, + } => handlers::handle_generate( + path, output, dockerfile, compose, terraform, all, dry_run, force, + ), + cli::GenerateCommand::Ci { + path, + platform, + format, + dry_run, + output, + env_prefix, + skip_docker, + notify, + } => handlers::handle_generate_ci( + path, platform, format, dry_run, output, env_prefix, skip_docker, notify, + ), + cli::GenerateCommand::Cd { + path, + platform, + target, + registry, + image_name, + dry_run, + output, + force, + } => handlers::handle_generate_cd( + path, platform, target, registry, image_name, dry_run, output, force, + ), + cli::GenerateCommand::CiCd { + path, + platform, + ci_format, + target, + registry, + image_name, + dry_run, + output, + force, + notify, + } => handlers::handle_generate_cicd( + path, platform, ci_format, target, registry, image_name, dry_run, output, force, + notify, + ), + }, Commands::Validate { path, types, diff --git a/src/main.rs b/src/main.rs index 58837589..c1381b7e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,10 +2,10 @@ use clap::Parser; use syncable_cli::{ analyzer::{self, analyze_monorepo, vulnerability::VulnerabilitySeverity}, cli::{ - ChatProvider, Cli, ColorScheme, Commands, DisplayFormat, EnvCommand, OutputFormat, - SecurityScanMode, SeverityThreshold, ToolsCommand, + ChatProvider, Cli, ColorScheme, Commands, DisplayFormat, EnvCommand, + GenerateCommand, OutputFormat, SecurityScanMode, SeverityThreshold, ToolsCommand, }, - config, generator, + config, generator, handle_generate_cd, handle_generate_ci, handle_generate_cicd, telemetry::{self}, }; @@ -216,56 +216,137 @@ async fn run() -> syncable_cli::Result<()> { } } } - Commands::Generate { - path, - output, - dockerfile, - compose, - terraform, - all, - dry_run, - force, - } => { - // Create telemetry properties - let mut properties = HashMap::new(); + Commands::Generate { command } => match command { + GenerateCommand::Iac { + path, + output, + dockerfile, + compose, + terraform, + all, + dry_run, + force, + } => { + // Create telemetry properties + let mut properties = HashMap::new(); - if dockerfile { - properties.insert("generate_dockerfile".to_string(), json!(true)); - } + if dockerfile { + properties.insert("generate_dockerfile".to_string(), json!(true)); + } - if compose { - properties.insert("generate_compose".to_string(), json!(true)); - } + if compose { + properties.insert("generate_compose".to_string(), json!(true)); + } - if terraform { - properties.insert("generate_terraform".to_string(), json!(true)); - } + if terraform { + properties.insert("generate_terraform".to_string(), json!(true)); + } - if all { - properties.insert("generate_all".to_string(), json!(true)); - } + if all { + properties.insert("generate_all".to_string(), json!(true)); + } - if dry_run { - properties.insert("dry_run".to_string(), json!(true)); - } + if dry_run { + properties.insert("dry_run".to_string(), json!(true)); + } - if force { - properties.insert("force_overwrite".to_string(), json!(true)); - } + if force { + properties.insert("force_overwrite".to_string(), json!(true)); + } - if output.is_some() { - properties.insert("custom_output_dir".to_string(), json!(true)); - } + if output.is_some() { + properties.insert("custom_output_dir".to_string(), json!(true)); + } - // Track Generate command with properties - if let Some(telemetry_client) = telemetry::get_telemetry_client() { - telemetry_client.track_generate(properties); - } + // Track Generate command with properties + if let Some(telemetry_client) = telemetry::get_telemetry_client() { + telemetry_client.track_generate(properties); + } - handle_generate( - path, output, dockerfile, compose, terraform, all, dry_run, force, - ) - } + handle_generate( + path, output, dockerfile, compose, terraform, all, dry_run, force, + ) + } + GenerateCommand::Ci { + path, + platform, + format, + dry_run, + output, + env_prefix, + skip_docker, + notify, + } => { + let mut properties = HashMap::new(); + properties.insert( + "ci_platform".to_string(), + json!(format!("{:?}", platform).to_lowercase()), + ); + if dry_run { + properties.insert("dry_run".to_string(), json!(true)); + } + if let Some(telemetry_client) = telemetry::get_telemetry_client() { + telemetry_client.track_generate(properties); + } + let notify_enabled = notify || config.generation.notify; + handle_generate_ci(path, platform, format, dry_run, output, env_prefix, skip_docker, notify_enabled) + } + GenerateCommand::Cd { + path, + platform, + target, + registry, + image_name, + dry_run, + output, + force, + } => { + let mut properties = HashMap::new(); + properties.insert( + "cd_platform".to_string(), + json!(format!("{:?}", platform).to_lowercase()), + ); + if let Some(ref t) = target { + properties.insert("cd_target".to_string(), json!(format!("{:?}", t).to_lowercase())); + } + if dry_run { + properties.insert("dry_run".to_string(), json!(true)); + } + if let Some(telemetry_client) = telemetry::get_telemetry_client() { + telemetry_client.track_generate(properties); + } + handle_generate_cd(path, platform, target, registry, image_name, dry_run, output, force) + } + GenerateCommand::CiCd { + path, + platform, + ci_format, + target, + registry, + image_name, + dry_run, + output, + force, + notify, + } => { + let mut properties = HashMap::new(); + properties.insert( + "cd_platform".to_string(), + json!(format!("{:?}", platform).to_lowercase()), + ); + properties.insert("combined_cicd".to_string(), json!(true)); + if let Some(ref t) = target { + properties.insert("cd_target".to_string(), json!(format!("{:?}", t).to_lowercase())); + } + if dry_run { + properties.insert("dry_run".to_string(), json!(true)); + } + if let Some(telemetry_client) = telemetry::get_telemetry_client() { + telemetry_client.track_generate(properties); + } + handle_generate_cicd(path, platform, ci_format, target, registry, image_name, dry_run, output, force, notify) + } + }, Commands::Validate { path, diff --git a/tests/cd_generator_integration.rs b/tests/cd_generator_integration.rs new file mode 100644 index 00000000..f6c9b69b --- /dev/null +++ b/tests/cd_generator_integration.rs @@ -0,0 +1,431 @@ +//! CD-26 — End-to-end integration tests for the CD generation subsystem. +//! +//! Tests the full pipeline: context collection → pipeline build → token +//! resolution → template rendering → YAML output validation. +//! +//! Also exercises `collect_cd_context` against language fixture directories +//! and verifies secrets-doc generation, config loading, and the combined +//! CI+CD workflow generation path. + +use std::path::PathBuf; + +use syncable_cli::generator::cd_generation::{ + context::{CdPlatform, DeployTarget, collect_cd_context}, + pipeline::build_cd_pipeline, + secrets_doc::generate_cd_secrets_doc, + templates, + token_resolver::resolve_tokens, +}; + +// ── Helpers ─────────────────────────────────────────────────────────────────── + +/// Returns the absolute path to a CI language fixture directory. +fn fixture(lang: &str) -> PathBuf { + PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("tests") + .join("fixtures") + .join("ci") + .join(lang) +} + +/// Asserts YAML string contains no patterns that look like real credentials. +fn assert_no_hardcoded_secrets(yaml: &str) { + assert!( + !yaml + .split_whitespace() + .any(|w| w.starts_with("ghp_") && w.len() > 10), + "output contains a GitHub token pattern" + ); + assert!( + !yaml.split_whitespace().any(|w| { + w.starts_with("AKIA") + && w.len() == 20 + && w[4..].chars().all(|c| c.is_ascii_uppercase() || c.is_ascii_digit()) + }), + "output contains an AWS access key pattern" + ); + assert!( + !yaml + .split_whitespace() + .any(|w| w.starts_with("sk-") && w.len() > 20), + "output contains an API secret key pattern" + ); +} + +/// Run the full pipeline (context → build → resolve → render) and return YAML. +fn render_full_pipeline(platform: CdPlatform, target: DeployTarget) -> String { + let tmp = tempfile::TempDir::new().unwrap(); + let ctx = collect_cd_context(tmp.path(), platform.clone(), Some(target), None, None, None) + .expect("context collection should succeed"); + let mut pipeline = build_cd_pipeline(&ctx); + resolve_tokens(&ctx, &mut pipeline); + match platform { + CdPlatform::Azure => templates::azure::render(&pipeline), + CdPlatform::Gcp => templates::gcp::render(&pipeline), + CdPlatform::Hetzner => templates::hetzner::render(&pipeline), + } +} + +// ── Full pipeline rendering: Azure ──────────────────────────────────────────── + +#[test] +fn azure_app_service_full_pipeline_has_structure() { + let yaml = render_full_pipeline(CdPlatform::Azure, DeployTarget::AppService); + assert!(!yaml.is_empty()); + assert!(yaml.contains("name:"), "missing workflow name"); + assert!(yaml.contains("on:"), "missing trigger section"); + assert!(yaml.contains("jobs:"), "missing jobs section"); +} + +#[test] +fn azure_app_service_yaml_has_required_sections() { + let yaml = render_full_pipeline(CdPlatform::Azure, DeployTarget::AppService); + assert!(yaml.contains("name:"), "missing workflow name"); + assert!(yaml.contains("on:"), "missing trigger section"); + assert!(yaml.contains("jobs:"), "missing jobs section"); +} + +#[test] +fn azure_aks_full_pipeline_has_structure() { + let yaml = render_full_pipeline(CdPlatform::Azure, DeployTarget::Aks); + assert!(!yaml.is_empty()); + assert!(yaml.contains("name:")); + assert!(yaml.contains("jobs:")); +} + +#[test] +fn azure_container_apps_full_pipeline_has_structure() { + let yaml = render_full_pipeline(CdPlatform::Azure, DeployTarget::ContainerApps); + assert!(!yaml.is_empty()); + assert!(yaml.contains("name:")); + assert!(yaml.contains("jobs:")); +} + +#[test] +fn azure_yaml_has_no_hardcoded_secrets() { + let yaml = render_full_pipeline(CdPlatform::Azure, DeployTarget::AppService); + assert_no_hardcoded_secrets(&yaml); +} + +#[test] +fn azure_yaml_contains_login_action() { + let yaml = render_full_pipeline(CdPlatform::Azure, DeployTarget::AppService); + assert!( + yaml.contains("azure/login@v2"), + "Azure pipeline must include azure/login action" + ); +} + +// ── Full pipeline rendering: GCP ────────────────────────────────────────────── + +#[test] +fn gcp_cloud_run_full_pipeline_has_structure() { + let yaml = render_full_pipeline(CdPlatform::Gcp, DeployTarget::CloudRun); + assert!(!yaml.is_empty()); + assert!(yaml.contains("name:"), "missing workflow name"); + assert!(yaml.contains("on:"), "missing trigger section"); + assert!(yaml.contains("jobs:"), "missing jobs section"); +} + +#[test] +fn gcp_cloud_run_yaml_has_required_sections() { + let yaml = render_full_pipeline(CdPlatform::Gcp, DeployTarget::CloudRun); + assert!(yaml.contains("name:"), "missing workflow name"); + assert!(yaml.contains("on:"), "missing trigger section"); + assert!(yaml.contains("jobs:"), "missing jobs section"); +} + +#[test] +fn gcp_gke_full_pipeline_has_structure() { + let yaml = render_full_pipeline(CdPlatform::Gcp, DeployTarget::Gke); + assert!(!yaml.is_empty()); + assert!(yaml.contains("name:")); + assert!(yaml.contains("jobs:")); +} + +#[test] +fn gcp_yaml_has_no_hardcoded_secrets() { + let yaml = render_full_pipeline(CdPlatform::Gcp, DeployTarget::CloudRun); + assert_no_hardcoded_secrets(&yaml); +} + +#[test] +fn gcp_yaml_contains_auth_action() { + let yaml = render_full_pipeline(CdPlatform::Gcp, DeployTarget::CloudRun); + assert!( + yaml.contains("google-github-actions/auth@v2"), + "GCP pipeline must include google-github-actions/auth" + ); +} + +// ── Full pipeline rendering: Hetzner ────────────────────────────────────────── + +#[test] +fn hetzner_vps_full_pipeline_has_structure() { + let yaml = render_full_pipeline(CdPlatform::Hetzner, DeployTarget::Vps); + assert!(!yaml.is_empty()); + assert!(yaml.contains("name:"), "missing workflow name"); + assert!(yaml.contains("on:"), "missing trigger section"); + assert!(yaml.contains("jobs:"), "missing jobs section"); +} + +#[test] +fn hetzner_vps_yaml_has_required_sections() { + let yaml = render_full_pipeline(CdPlatform::Hetzner, DeployTarget::Vps); + assert!(yaml.contains("name:"), "missing workflow name"); + assert!(yaml.contains("on:"), "missing trigger section"); + assert!(yaml.contains("jobs:"), "missing jobs section"); +} + +#[test] +fn hetzner_k8s_full_pipeline_has_structure() { + let yaml = render_full_pipeline(CdPlatform::Hetzner, DeployTarget::HetznerK8s); + assert!(!yaml.is_empty()); + assert!(yaml.contains("name:")); + assert!(yaml.contains("jobs:")); +} + +#[test] +fn hetzner_coolify_full_pipeline_has_structure() { + let yaml = render_full_pipeline(CdPlatform::Hetzner, DeployTarget::Coolify); + assert!(!yaml.is_empty()); + assert!(yaml.contains("name:")); +} + +#[test] +fn hetzner_yaml_has_no_hardcoded_secrets() { + let yaml = render_full_pipeline(CdPlatform::Hetzner, DeployTarget::Vps); + assert_no_hardcoded_secrets(&yaml); +} + +#[test] +fn hetzner_yaml_contains_ssh_reference() { + let yaml = render_full_pipeline(CdPlatform::Hetzner, DeployTarget::Vps); + assert!( + yaml.contains("ssh") || yaml.contains("SSH"), + "Hetzner VPS pipeline must reference SSH" + ); +} + +// ── Secrets doc generation ──────────────────────────────────────────────────── + +#[test] +fn secrets_doc_for_azure_yaml_contains_credentials() { + let yaml = render_full_pipeline(CdPlatform::Azure, DeployTarget::AppService); + let doc = generate_cd_secrets_doc(&yaml, &CdPlatform::Azure); + assert!( + doc.contains("AZURE") || doc.contains("azure"), + "Azure secrets doc should mention Azure" + ); +} + +#[test] +fn secrets_doc_for_gcp_yaml_mentions_gcp() { + let yaml = render_full_pipeline(CdPlatform::Gcp, DeployTarget::CloudRun); + let doc = generate_cd_secrets_doc(&yaml, &CdPlatform::Gcp); + assert!( + doc.contains("GCP") || doc.contains("gcp") || doc.contains("Google"), + "GCP secrets doc should mention GCP/Google" + ); +} + +#[test] +fn secrets_doc_for_hetzner_includes_prerequisites() { + let yaml = render_full_pipeline(CdPlatform::Hetzner, DeployTarget::Vps); + let doc = generate_cd_secrets_doc(&yaml, &CdPlatform::Hetzner); + // Hetzner secrets doc always appends prerequisites checklist + assert!( + doc.contains("Prerequisite") || doc.contains("prerequisite") || doc.contains("Firewall") || doc.contains("Docker"), + "Hetzner secrets doc should include prerequisites checklist" + ); +} + +#[test] +fn secrets_doc_is_markdown_formatted() { + let yaml = render_full_pipeline(CdPlatform::Azure, DeployTarget::AppService); + let doc = generate_cd_secrets_doc(&yaml, &CdPlatform::Azure); + // Should contain markdown table separators or section headers + assert!( + doc.contains("| ") || doc.contains("# ") || doc.contains("## "), + "Secrets doc should be Markdown formatted" + ); +} + +// ── Context collection from language fixtures ───────────────────────────────── + +#[test] +fn collect_cd_context_succeeds_for_node_fixture() { + let ctx = + collect_cd_context(&fixture("node"), CdPlatform::Azure, None, None, None, None) + .expect("should collect CD context from Node.js fixture"); + assert_eq!(ctx.platform, CdPlatform::Azure); + assert!(!ctx.project_name.is_empty(), "project name should be detected"); +} + +#[test] +fn collect_cd_context_succeeds_for_python_fixture() { + let ctx = + collect_cd_context(&fixture("python"), CdPlatform::Gcp, None, None, None, None) + .expect("should collect CD context from Python fixture"); + assert_eq!(ctx.platform, CdPlatform::Gcp); +} + +#[test] +fn collect_cd_context_succeeds_for_rust_fixture() { + let ctx = collect_cd_context( + &fixture("rust"), + CdPlatform::Hetzner, + None, + None, + None, + None, + ) + .expect("should collect CD context from Rust fixture"); + assert_eq!(ctx.platform, CdPlatform::Hetzner); +} + +#[test] +fn collect_cd_context_succeeds_for_go_fixture() { + let ctx = collect_cd_context(&fixture("go"), CdPlatform::Azure, None, None, None, None) + .expect("should collect CD context from Go fixture"); + assert_eq!(ctx.platform, CdPlatform::Azure); +} + +#[test] +fn collect_cd_context_succeeds_for_java_fixture() { + let ctx = collect_cd_context( + &fixture("java"), + CdPlatform::Gcp, + Some(DeployTarget::CloudRun), + None, + None, + None, + ) + .expect("should collect CD context from Java fixture"); + assert_eq!(ctx.platform, CdPlatform::Gcp); + assert_eq!(ctx.deploy_target, DeployTarget::CloudRun); +} + +// ── Config file loading ─────────────────────────────────────────────────────── + +#[test] +fn cd_config_loads_from_syncable_cd_toml() { + use syncable_cli::generator::cd_generation::cd_config::load_cd_config; + + let tmp = tempfile::TempDir::new().unwrap(); + std::fs::write( + tmp.path().join(".syncable.cd.toml"), + r#" +platform = "azure" +target = "app-service" +registry = "acr" +image_name = "my-integration-app" +health_check_path = "/healthz" +default_branch = "develop" +"#, + ) + .unwrap(); + + let config = load_cd_config(tmp.path()) + .expect("should load config") + .expect("config should exist"); + assert_eq!(config.platform.as_deref(), Some("azure")); + assert_eq!(config.image_name.as_deref(), Some("my-integration-app")); + assert_eq!(config.default_branch.as_deref(), Some("develop")); +} + +#[test] +fn cd_config_merges_into_context() { + use syncable_cli::generator::cd_generation::cd_config::{load_cd_config, merge_config_into_cd_context}; + + let tmp = tempfile::TempDir::new().unwrap(); + std::fs::write( + tmp.path().join(".syncable.cd.toml"), + r#" +image_name = "merged-app" +health_check_path = "/ready" +"#, + ) + .unwrap(); + + let mut ctx = + collect_cd_context(tmp.path(), CdPlatform::Azure, None, None, None, None).unwrap(); + let config = load_cd_config(tmp.path()).unwrap().unwrap(); + merge_config_into_cd_context(&config, &mut ctx); + + assert_eq!(ctx.image_name, "merged-app"); + assert_eq!(ctx.health_check_path.as_deref(), Some("/ready")); +} + +// ── Cross-platform consistency ──────────────────────────────────────────────── + +#[test] +fn all_platforms_produce_non_empty_yaml() { + let combos: Vec<(CdPlatform, DeployTarget)> = vec![ + (CdPlatform::Azure, DeployTarget::AppService), + (CdPlatform::Azure, DeployTarget::Aks), + (CdPlatform::Azure, DeployTarget::ContainerApps), + (CdPlatform::Gcp, DeployTarget::CloudRun), + (CdPlatform::Gcp, DeployTarget::Gke), + (CdPlatform::Hetzner, DeployTarget::Vps), + (CdPlatform::Hetzner, DeployTarget::HetznerK8s), + (CdPlatform::Hetzner, DeployTarget::Coolify), + ]; + + for (platform, target) in combos { + let yaml = render_full_pipeline(platform.clone(), target.clone()); + assert!( + !yaml.is_empty(), + "YAML should not be empty for {:?}/{:?}", + platform, + target + ); + assert!( + yaml.len() > 50, + "YAML is suspiciously short for {:?}/{:?}: {} bytes", + platform, + target, + yaml.len() + ); + } +} + +#[test] +fn all_platform_yamls_use_secrets_expressions() { + // All rendered YAML should reference secrets via ${{ secrets.* }} — never plain text + let combos = [ + (CdPlatform::Azure, DeployTarget::AppService), + (CdPlatform::Gcp, DeployTarget::CloudRun), + (CdPlatform::Hetzner, DeployTarget::Vps), + ]; + + for (platform, target) in &combos { + let yaml = render_full_pipeline(platform.clone(), target.clone()); + // Should use GitHub Actions secret expression syntax + if yaml.contains("secrets.") { + assert!( + yaml.contains("${{ secrets."), + "Secrets in {:?} YAML should use ${{{{ secrets.* }}}} syntax", + platform + ); + } + } +} + +#[test] +fn health_check_present_in_all_rendered_pipelines() { + let combos = [ + (CdPlatform::Azure, DeployTarget::AppService), + (CdPlatform::Gcp, DeployTarget::CloudRun), + (CdPlatform::Hetzner, DeployTarget::Vps), + ]; + + for (platform, target) in &combos { + let yaml = render_full_pipeline(platform.clone(), target.clone()); + assert!( + yaml.contains("health") || yaml.contains("Health") || yaml.contains("curl") || yaml.contains("/health"), + "Pipeline for {:?} should reference health check", + platform + ); + } +} diff --git a/tests/ci_generator_integration.rs b/tests/ci_generator_integration.rs new file mode 100644 index 00000000..3f54093c --- /dev/null +++ b/tests/ci_generator_integration.rs @@ -0,0 +1,258 @@ +//! CI-26 — End-to-end integration tests for the CI generation subsystem. +//! +//! Tests template rendering at the full-pipeline level — valid YAML output, +//! required structural fields, and absence of hardcoded secrets. +//! +//! Also exercises `collect_ci_context` against the language fixture projects +//! in `tests/fixtures/ci/` to verify that context collection succeeds and +//! produces the expected primary language for each ecosystem. +//! +//! # Note on CI-01 wiring +//! +//! The CLI handler `handle_generate_ci` currently returns a static skeleton +//! rather than invoking the full pipeline. These tests exercise the template +//! layer directly. A companion test asserting the full CLI binary output will +//! be added once CI-01 (final wiring) replaces the stub. + +use std::path::PathBuf; + +use syncable_cli::cli::{CiFormat, CiPlatform}; +use syncable_cli::generator::ci_generation::{ + context::collect_ci_context, + schema::{ + CiPipeline, InstallStep, RuntimeStep, SecretScanStep, TestStep, TriggerConfig, + }, + templates, +}; + +// ── Shared helpers ──────────────────────────────────────────────────────────── + +fn minimal_pipeline(platform: CiPlatform, format: CiFormat) -> CiPipeline { + CiPipeline { + project_name: "integration-test-app".to_string(), + platform, + format, + triggers: TriggerConfig { + push_branches: vec!["main".to_string()], + pr_branches: vec!["main".to_string()], + tag_pattern: None, + scheduled: None, + }, + runtime: RuntimeStep { + action: "actions/setup-node@v4".to_string(), + version: "20".to_string(), + }, + cache: None, + install: InstallStep { command: "npm ci".to_string() }, + lint: None, + test: TestStep { + command: "npm test".to_string(), + coverage_flag: None, + coverage_report_path: None, + }, + build: None, + docker_build: None, + image_scan: None, + secret_scan: syncable_cli::generator::ci_generation::schema::SecretScanStep { + github_token_expr: "${{ secrets.GITHUB_TOKEN }}".to_string(), + gitleaks_license_secret: None, + }, + upload_artifact: None, + unresolved_tokens: vec![], + } +} + +/// Returns the absolute path to a CI language fixture directory. +fn fixture(lang: &str) -> PathBuf { + PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("tests") + .join("fixtures") + .join("ci") + .join(lang) +} + +/// Asserts that `yaml` contains no string patterns that look like real +/// credential values (GitHub tokens, AWS keys, etc.). +fn assert_no_hardcoded_secrets(yaml: &str) { + // Real GitHub personal-access tokens start with "ghp_" followed by 36+ alphanum chars. + assert!( + !yaml.split_whitespace().any(|w| w.starts_with("ghp_") && w.len() > 10), + "output contains a GitHub token pattern: {yaml}" + ); + // Real AWS access key IDs start with "AKIA" followed by exactly 16 uppercase chars. + assert!( + !yaml.split_whitespace().any(|w| { + w.starts_with("AKIA") + && w.len() == 20 + && w[4..].chars().all(|c| c.is_ascii_uppercase() || c.is_ascii_digit()) + }), + "output contains an AWS access key pattern: {yaml}" + ); +} + +// ── GitHub Actions end-to-end ───────────────────────────────────────────────── + +#[test] +fn github_actions_output_is_valid_yaml() { + let yaml = templates::github_actions::render(&minimal_pipeline( + CiPlatform::Hetzner, + CiFormat::GithubActions, + )); + serde_yaml::from_str::(&yaml) + .expect("GitHub Actions output must be valid YAML"); +} + +#[test] +fn github_actions_output_contains_checkout_step() { + let yaml = templates::github_actions::render(&minimal_pipeline( + CiPlatform::Hetzner, + CiFormat::GithubActions, + )); + assert!( + yaml.contains("actions/checkout"), + "GitHub Actions pipeline must include a checkout step" + ); +} + +#[test] +fn github_actions_output_contains_runtime_setup_step() { + let yaml = templates::github_actions::render(&minimal_pipeline( + CiPlatform::Hetzner, + CiFormat::GithubActions, + )); + // Runtime setup action was injected into the pipeline. + assert!( + yaml.contains("setup-node"), + "pipeline must contain a runtime setup step" + ); +} + +#[test] +fn github_actions_output_contains_test_step() { + let yaml = templates::github_actions::render(&minimal_pipeline( + CiPlatform::Hetzner, + CiFormat::GithubActions, + )); + assert!(yaml.contains("npm test"), "pipeline must contain the test command"); +} + +#[test] +fn github_actions_output_has_no_hardcoded_secrets() { + let yaml = templates::github_actions::render(&minimal_pipeline( + CiPlatform::Hetzner, + CiFormat::GithubActions, + )); + assert_no_hardcoded_secrets(&yaml); +} + +// ── Azure Pipelines end-to-end ──────────────────────────────────────────────── + +#[test] +fn azure_pipelines_output_is_valid_yaml() { + let yaml = templates::azure_pipelines::render(&minimal_pipeline( + CiPlatform::Azure, + CiFormat::AzurePipelines, + )); + serde_yaml::from_str::(&yaml) + .expect("Azure Pipelines output must be valid YAML"); +} + +#[test] +fn azure_pipelines_output_contains_required_fields() { + let yaml = templates::azure_pipelines::render(&minimal_pipeline( + CiPlatform::Azure, + CiFormat::AzurePipelines, + )); + // Azure auto-checkouts; runtime setup and test step are required. + assert!(yaml.contains("npm test"), "Azure pipeline must contain the test command"); + assert!( + yaml.contains("ubuntu") || yaml.contains("ubuntu-latest"), + "Azure pipeline must specify an agent VM image" + ); +} + +#[test] +fn azure_pipelines_output_has_no_hardcoded_secrets() { + let yaml = templates::azure_pipelines::render(&minimal_pipeline( + CiPlatform::Azure, + CiFormat::AzurePipelines, + )); + assert_no_hardcoded_secrets(&yaml); +} + +// ── Cloud Build end-to-end ──────────────────────────────────────────────────── + +#[test] +fn cloud_build_output_is_valid_yaml() { + let yaml = templates::cloud_build::render(&minimal_pipeline( + CiPlatform::Gcp, + CiFormat::CloudBuild, + )); + serde_yaml::from_str::(&yaml) + .expect("Cloud Build output must be valid YAML"); +} + +#[test] +fn cloud_build_output_contains_test_step() { + let yaml = templates::cloud_build::render(&minimal_pipeline( + CiPlatform::Gcp, + CiFormat::CloudBuild, + )); + assert!(yaml.contains("npm test"), "Cloud Build pipeline must contain the test command"); +} + +#[test] +fn cloud_build_output_has_no_hardcoded_secrets() { + let yaml = templates::cloud_build::render(&minimal_pipeline( + CiPlatform::Gcp, + CiFormat::CloudBuild, + )); + assert_no_hardcoded_secrets(&yaml); +} + +// ── CiContext collection from language fixtures ─────────────────────────────── + +#[test] +fn collect_ci_context_succeeds_for_node_fixture() { + let ctx = collect_ci_context(&fixture("node"), CiPlatform::Hetzner, CiFormat::GithubActions) + .expect("should collect context from Node.js fixture"); + assert_ne!( + ctx.primary_language.to_lowercase(), + "unknown", + "should detect a real language for Node.js fixture" + ); +} + +#[test] +fn collect_ci_context_succeeds_for_python_fixture() { + let ctx = + collect_ci_context(&fixture("python"), CiPlatform::Gcp, CiFormat::GithubActions) + .expect("should collect context from Python fixture"); + assert_ne!(ctx.primary_language.to_lowercase(), "unknown"); +} + +#[test] +fn collect_ci_context_succeeds_for_rust_fixture() { + let ctx = + collect_ci_context(&fixture("rust"), CiPlatform::Hetzner, CiFormat::GithubActions) + .expect("should collect context from Rust fixture"); + assert!( + ctx.primary_language.to_lowercase().contains("rust"), + "expected Rust primary language, got: {}", + ctx.primary_language + ); +} + +#[test] +fn collect_ci_context_succeeds_for_go_fixture() { + let ctx = collect_ci_context(&fixture("go"), CiPlatform::Gcp, CiFormat::GithubActions) + .expect("should collect context from Go fixture"); + assert_ne!(ctx.primary_language.to_lowercase(), "unknown"); +} + +#[test] +fn collect_ci_context_succeeds_for_java_fixture() { + let ctx = collect_ci_context(&fixture("java"), CiPlatform::Azure, CiFormat::AzurePipelines) + .expect("should collect context from Java fixture"); + assert_ne!(ctx.primary_language.to_lowercase(), "unknown"); +} diff --git a/tests/ci_generator_unit.rs b/tests/ci_generator_unit.rs new file mode 100644 index 00000000..0a339a39 --- /dev/null +++ b/tests/ci_generator_unit.rs @@ -0,0 +1,354 @@ +//! CI-25 — Unit tests for the CI generation subsystem. +//! +//! Exercises: token resolution, monorepo strategy generator, file writer +//! conflict detection, template rendering (all three platforms), coverage +//! step, notify step. Each test section maps to a spec bullet in CI-25. + +use std::io::Cursor; +use std::path::PathBuf; + +use tempfile::TempDir; + +use syncable_cli::cli::{CiFormat, CiPlatform}; +use syncable_cli::generator::ci_generation::{ + coverage_step::{ + coverage_secrets_doc_entry, generate_coverage_step_for, render_coverage_yaml, + CoverageService, + }, + monorepo::generate_monorepo_strategy, + notify_step::{generate_notify_step, render_notify_yaml}, + schema::{ + CiPipeline, InstallStep, RuntimeStep, SecretScanStep, TestStep, TriggerConfig, + }, + templates, + test_helpers::make_base_ctx, + token_resolver::resolve_tokens, + writer::{write_ci_files, write_ci_files_interactive, CiFile, WriteOutcome}, +}; + +// ── Shared constructor ──────────────────────────────────────────────────────── + +/// Returns a fully-resolved minimal `CiPipeline` — no placeholder tokens. +fn minimal_pipeline() -> CiPipeline { + CiPipeline { + project_name: "my-service".to_string(), + platform: CiPlatform::Gcp, + format: CiFormat::GithubActions, + triggers: TriggerConfig { + push_branches: vec!["main".to_string()], + pr_branches: vec!["main".to_string()], + tag_pattern: None, + scheduled: None, + }, + runtime: RuntimeStep { + action: "actions/setup-node@v4".to_string(), + version: "20".to_string(), + }, + cache: None, + install: InstallStep { command: "npm ci".to_string() }, + lint: None, + test: TestStep { + command: "npm test".to_string(), + coverage_flag: None, + coverage_report_path: None, + }, + build: None, + docker_build: None, + image_scan: None, + secret_scan: SecretScanStep { + github_token_expr: "${{ secrets.GITHUB_TOKEN }}".to_string(), + gitleaks_license_secret: None, + }, + upload_artifact: None, + unresolved_tokens: vec![], + } +} + +// ── Token resolution ────────────────────────────────────────────────────────── + +#[test] +fn resolved_map_contains_project_name_and_runtime_version() { + let dir = TempDir::new().unwrap(); + let mut ctx = make_base_ctx(dir.path(), "Node.js"); + ctx.runtime_versions.insert("Node.js".to_string(), "20.x".to_string()); + ctx.project_name = "api-server".to_string(); + + let mut pipeline = minimal_pipeline(); + pipeline.project_name = "{{PROJECT_NAME}}".to_string(); + pipeline.runtime.version = "{{RUNTIME_VERSION}}".to_string(); + + let resolved = resolve_tokens(&ctx, &mut pipeline); + + assert_eq!(pipeline.project_name, "api-server"); + assert_eq!(pipeline.runtime.version, "20.x"); + assert_eq!(pipeline.unresolved_tokens.len(), 0); + assert!(resolved.contains_key("PROJECT_NAME")); + assert!(resolved.contains_key("RUNTIME_VERSION")); +} + +#[test] +fn unknown_token_is_recorded_as_unresolved() { + let dir = TempDir::new().unwrap(); + let ctx = make_base_ctx(dir.path(), "Rust"); + + let mut pipeline = minimal_pipeline(); + pipeline.install.command = "{{CUSTOM_INSTALL_CMD}}".to_string(); + + resolve_tokens(&ctx, &mut pipeline); + + assert_eq!(pipeline.unresolved_tokens.len(), 1); + assert_eq!(pipeline.unresolved_tokens[0].name, "CUSTOM_INSTALL_CMD"); + assert_eq!( + pipeline.unresolved_tokens[0].placeholder, + "{{CUSTOM_INSTALL_CMD}}" + ); +} + +#[test] +fn context_without_runtime_version_leaves_token_unresolved() { + let dir = TempDir::new().unwrap(); + // No runtime_versions entry → RUNTIME_VERSION has no mapping. + let ctx = make_base_ctx(dir.path(), "Python"); + + let mut pipeline = minimal_pipeline(); + pipeline.runtime.version = "{{RUNTIME_VERSION}}".to_string(); + + resolve_tokens(&ctx, &mut pipeline); + + let names: Vec<&str> = + pipeline.unresolved_tokens.iter().map(|t| t.name.as_str()).collect(); + assert!(names.contains(&"RUNTIME_VERSION"), "expected RUNTIME_VERSION in {:?}", names); +} + +#[test] +fn fully_resolved_pipeline_has_no_unresolved_tokens() { + let dir = TempDir::new().unwrap(); + // Pipeline already has concrete values — no {{TOKEN}} patterns. + let ctx = make_base_ctx(dir.path(), "Go"); + let mut pipeline = minimal_pipeline(); + + resolve_tokens(&ctx, &mut pipeline); + + assert_eq!( + pipeline.unresolved_tokens.len(), + 0, + "pipeline with no placeholders should produce zero unresolved tokens" + ); +} + +// ── Monorepo strategy ───────────────────────────────────────────────────────── + +#[test] +fn monorepo_strategy_returns_none_for_single_project() { + let dir = TempDir::new().unwrap(); + let mut ctx = make_base_ctx(dir.path(), "TypeScript"); + ctx.monorepo = false; + + assert!(generate_monorepo_strategy(&ctx).is_none()); +} + +#[test] +fn monorepo_strategy_returns_none_for_fewer_than_two_packages() { + let dir = TempDir::new().unwrap(); + let mut ctx = make_base_ctx(dir.path(), "TypeScript"); + ctx.monorepo = true; + ctx.monorepo_packages = vec!["packages/api".to_string()]; + + assert!(generate_monorepo_strategy(&ctx).is_none()); +} + +#[test] +fn monorepo_strategy_produced_for_three_packages() { + let dir = TempDir::new().unwrap(); + let mut ctx = make_base_ctx(dir.path(), "TypeScript"); + ctx.monorepo = true; + ctx.monorepo_packages = vec![ + "packages/api".to_string(), + "packages/web".to_string(), + "packages/sdk".to_string(), + ]; + + let strategy = generate_monorepo_strategy(&ctx).unwrap(); + assert_eq!(strategy.packages.len(), 3); + assert!( + strategy.detect_job_yaml.contains("dorny/paths-filter"), + "detect job should reference dorny/paths-filter" + ); + assert!(strategy.matrix_job_yaml.contains("matrix")); +} + +#[test] +fn monorepo_filter_config_contains_all_package_paths() { + let dir = TempDir::new().unwrap(); + let mut ctx = make_base_ctx(dir.path(), "Go"); + ctx.monorepo = true; + ctx.monorepo_packages = + vec!["services/auth".to_string(), "services/billing".to_string()]; + + let strategy = generate_monorepo_strategy(&ctx).unwrap(); + assert!(strategy.filter_config.contains("services/auth/**")); + assert!(strategy.filter_config.contains("services/billing/**")); +} + +// ── File writer & conflict detection ───────────────────────────────────────── + +/// Minimal valid GitHub Actions YAML for writer tests. +fn valid_yaml() -> String { + "name: CI\non:\n push:\n branches: [main]\njobs:\n ci:\n runs-on: ubuntu-latest\n steps: []\n" + .to_string() +} + +#[test] +fn write_ci_files_creates_new_file() { + let dir = TempDir::new().unwrap(); + let files = vec![CiFile::pipeline(valid_yaml(), CiFormat::GithubActions)]; + + let summary = write_ci_files(&files, dir.path(), false).unwrap(); + + assert_eq!(summary.created(), 1, "new file should be created"); + assert_eq!(summary.skipped(), 0); + assert!(dir.path().join(".github/workflows/ci.yml").exists()); +} + +#[test] +fn write_ci_files_detects_conflict_on_different_content() { + let dir = TempDir::new().unwrap(); + let ci_dir = dir.path().join(".github/workflows"); + std::fs::create_dir_all(&ci_dir).unwrap(); + std::fs::write(ci_dir.join("ci.yml"), "name: OldPipeline\n").unwrap(); + + let files = vec![CiFile::pipeline(valid_yaml(), CiFormat::GithubActions)]; + let summary = write_ci_files(&files, dir.path(), false).unwrap(); + + assert_eq!(summary.skipped(), 1, "conflict should be recorded as skipped"); + assert!(summary.has_conflicts()); +} + +#[test] +fn write_ci_files_overwrites_when_force_is_true() { + let dir = TempDir::new().unwrap(); + let ci_dir = dir.path().join(".github/workflows"); + std::fs::create_dir_all(&ci_dir).unwrap(); + std::fs::write(ci_dir.join("ci.yml"), "name: OldPipeline\n").unwrap(); + + let files = vec![CiFile::pipeline(valid_yaml(), CiFormat::GithubActions)]; + let summary = write_ci_files(&files, dir.path(), true).unwrap(); + + assert_eq!(summary.overwritten(), 1); + assert!(!summary.has_conflicts()); +} + +#[test] +fn write_ci_files_records_invalid_yaml_outcome() { + let dir = TempDir::new().unwrap(); + let files = vec![CiFile::pipeline( + "not: valid: yaml:\n - [\n".to_string(), + CiFormat::GithubActions, + )]; + + let summary = write_ci_files(&files, dir.path(), false).unwrap(); + assert_eq!(summary.invalid(), 1); +} + +#[test] +fn write_ci_files_interactive_resolves_conflict_with_overwrite_choice() { + let dir = TempDir::new().unwrap(); + let ci_dir = dir.path().join(".github/workflows"); + std::fs::create_dir_all(&ci_dir).unwrap(); + std::fs::write(ci_dir.join("ci.yml"), "name: OldPipeline\n").unwrap(); + + let files = vec![CiFile::pipeline(valid_yaml(), CiFormat::GithubActions)]; + // Simulate user typing "o" then Enter. + let mut reader = Cursor::new("o\n"); + let summary = + write_ci_files_interactive(&files, dir.path(), &mut reader).unwrap(); + + assert_eq!(summary.overwritten(), 1); +} + +// ── Template rendering ──────────────────────────────────────────────────────── + +#[test] +fn github_actions_render_produces_valid_yaml() { + let output = templates::github_actions::render(&minimal_pipeline()); + serde_yaml::from_str::(&output) + .expect("GitHub Actions output must be valid YAML"); +} + +#[test] +fn azure_pipelines_render_produces_valid_yaml() { + let output = templates::azure_pipelines::render(&minimal_pipeline()); + serde_yaml::from_str::(&output) + .expect("Azure Pipelines output must be valid YAML"); +} + +#[test] +fn cloud_build_render_produces_valid_yaml() { + let output = templates::cloud_build::render(&minimal_pipeline()); + serde_yaml::from_str::(&output) + .expect("Cloud Build output must be valid YAML"); +} + +/// Snapshot test — demonstrates `insta` usage; on first run with +/// `INSTA_UPDATE=unseen cargo test` the snapshot file is created and +/// committed alongside this file. +#[test] +fn github_actions_render_snapshot() { + let output = templates::github_actions::render(&minimal_pipeline()); + insta::assert_snapshot!(output); +} + +// ── Coverage step ───────────────────────────────────────────────────────────── + +#[test] +fn coverage_yaml_is_valid_and_contains_codecov_action() { + let test = TestStep { + command: "pytest".to_string(), + coverage_flag: Some("--cov=.".to_string()), + coverage_report_path: Some("coverage.xml".to_string()), + }; + let step = generate_coverage_step_for(&test, CoverageService::Codecov).unwrap(); + let yaml = render_coverage_yaml(&step); + + // render_coverage_yaml returns a step snippet (not a complete YAML document); + // full-document validity is asserted by the template integration tests. + assert!(yaml.contains("codecov-action"), "should reference codecov-action"); + assert!(yaml.contains("coverage.xml"), "should embed the report path"); + assert!(yaml.contains("CODECOV_TOKEN"), "should reference the secret"); +} + +#[test] +fn coverage_secrets_doc_marks_token_as_optional() { + let test = TestStep { + command: "pytest".to_string(), + coverage_flag: Some("--cov=.".to_string()), + coverage_report_path: Some("coverage.xml".to_string()), + }; + let step = generate_coverage_step_for(&test, CoverageService::Codecov).unwrap(); + let doc = coverage_secrets_doc_entry(&step); + + assert!(doc.contains("CODECOV_TOKEN")); + assert!( + doc.to_lowercase().contains("optional"), + "CODECOV_TOKEN should be marked optional" + ); +} + +// ── Notify step ─────────────────────────────────────────────────────────────── + +#[test] +fn notify_yaml_contains_failure_condition_and_slack_action() { + let step = generate_notify_step(true).unwrap(); + let yaml = render_notify_yaml(&step); + + assert!(yaml.contains("if: failure()"), "must include `if: failure()`"); + assert!( + yaml.contains("slackapi/slack-github-action"), + "must reference the Slack action" + ); +} + +#[test] +fn notify_step_disabled_returns_none() { + assert!(generate_notify_step(false).is_none()); +} diff --git a/tests/fixtures/ci/go/go.mod b/tests/fixtures/ci/go/go.mod new file mode 100644 index 00000000..5dc8968f --- /dev/null +++ b/tests/fixtures/ci/go/go.mod @@ -0,0 +1,7 @@ +module github.com/test/go-app + +go 1.21 + +require ( + github.com/stretchr/testify v1.8.4 +) diff --git a/tests/fixtures/ci/java/pom.xml b/tests/fixtures/ci/java/pom.xml new file mode 100644 index 00000000..522151f9 --- /dev/null +++ b/tests/fixtures/ci/java/pom.xml @@ -0,0 +1,24 @@ + + + 4.0.0 + com.example + test-java-app + 1.0.0 + jar + + 17 + 17 + UTF-8 + + + + org.junit.jupiter + junit-jupiter + 5.10.0 + test + + + diff --git a/tests/fixtures/ci/node/package-lock.json b/tests/fixtures/ci/node/package-lock.json new file mode 100644 index 00000000..d78163bb --- /dev/null +++ b/tests/fixtures/ci/node/package-lock.json @@ -0,0 +1,7 @@ +{ + "name": "test-node-app", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": {} +} diff --git a/tests/fixtures/ci/node/package.json b/tests/fixtures/ci/node/package.json new file mode 100644 index 00000000..38329de9 --- /dev/null +++ b/tests/fixtures/ci/node/package.json @@ -0,0 +1,13 @@ +{ + "name": "test-node-app", + "version": "1.0.0", + "scripts": { + "test": "jest --passWithNoTests", + "build": "tsc -b" + }, + "devDependencies": { + "jest": "^29.0.0", + "typescript": "^5.0.0", + "@types/jest": "^29.0.0" + } +} diff --git a/tests/fixtures/ci/python/pyproject.toml b/tests/fixtures/ci/python/pyproject.toml new file mode 100644 index 00000000..41c2fb49 --- /dev/null +++ b/tests/fixtures/ci/python/pyproject.toml @@ -0,0 +1,11 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "test-python-app" +version = "0.1.0" +requires-python = ">=3.11" + +[tool.pytest.ini_options] +testpaths = ["tests"] diff --git a/tests/fixtures/ci/python/requirements.txt b/tests/fixtures/ci/python/requirements.txt new file mode 100644 index 00000000..c86c0786 --- /dev/null +++ b/tests/fixtures/ci/python/requirements.txt @@ -0,0 +1,2 @@ +pytest>=7.0 +pytest-cov>=4.0 diff --git a/tests/fixtures/ci/rust/Cargo.toml b/tests/fixtures/ci/rust/Cargo.toml new file mode 100644 index 00000000..cb93ceaa --- /dev/null +++ b/tests/fixtures/ci/rust/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "test-rust-app" +version = "0.1.0" +edition = "2021" + +[[bin]] +name = "test-rust-app" +path = "src/main.rs" diff --git a/tests/snapshots/ci_generator_unit__github_actions_render_snapshot.snap b/tests/snapshots/ci_generator_unit__github_actions_render_snapshot.snap new file mode 100644 index 00000000..8584eeb5 --- /dev/null +++ b/tests/snapshots/ci_generator_unit__github_actions_render_snapshot.snap @@ -0,0 +1,28 @@ +--- +source: tests/ci_generator_unit.rs +expression: output +--- +name: CI +on: + push: + branches: + - main + pull_request: + branches: + - main +jobs: + ci: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up runtime + uses: actions/setup-node@v4 + with: + node-version: '20' + - name: Install dependencies + run: npm ci + - name: Test + run: npm test + - uses: gitleaks/gitleaks-action@v2 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}