From e60ab1053e68fcd2c5d276b4e93e99a42ecb717b Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 17:49:17 -0700 Subject: [PATCH 01/25] chore(terraform): ignore .terraform/ cache and pin tflint ruleset Co-Authored-By: Claude Opus 4.7 (1M context) --- .gitignore | 1 + terraform/.tflint.hcl | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 terraform/.tflint.hcl diff --git a/.gitignore b/.gitignore index 66332b3..d0c5767 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,4 @@ docs/plans/ !.dev.vars.example .wrangler/ .claude/ +.terraform/ diff --git a/terraform/.tflint.hcl b/terraform/.tflint.hcl new file mode 100644 index 0000000..427121c --- /dev/null +++ b/terraform/.tflint.hcl @@ -0,0 +1,4 @@ +plugin "terraform" { + enabled = true + preset = "recommended" +} From 99b7b219e6629d6cc23c0b07a9cfddc61012cdad Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 17:50:17 -0700 Subject: [PATCH 02/25] chore(tooling): install terraform + tflint in devcontainer and configure Zed LSP Co-Authored-By: Claude Opus 4.7 (1M context) --- .devcontainer/devcontainer.json | 18 ++++++++++++++++-- .zed/settings.json | 12 ++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 19cbb00..6fce328 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -2,13 +2,18 @@ "name": "task-action", "image": "mcr.microsoft.com/devcontainers/typescript-node:24", "features": { - "ghcr.io/devcontainers/features/github-cli:1": {} + "ghcr.io/devcontainers/features/github-cli:1": {}, + "ghcr.io/devcontainers/features/terraform:1": { + "version": "1.9.8", + "tflint": "0.53.0" + } }, "postCreateCommand": "npm install", "customizations": { "vscode": { "extensions": [ - "biomejs.biome" + "biomejs.biome", + "hashicorp.terraform" ], "settings": { "editor.defaultFormatter": "biomejs.biome", @@ -16,6 +21,15 @@ "editor.tabSize": 2, "[typescript]": { "editor.defaultFormatter": "biomejs.biome" + }, + "[terraform]": { + "editor.defaultFormatter": "hashicorp.terraform", + "editor.formatOnSave": true, + "editor.tabSize": 2 + }, + "[terraform-vars]": { + "editor.defaultFormatter": "hashicorp.terraform", + "editor.formatOnSave": true } } } diff --git a/.zed/settings.json b/.zed/settings.json index 292cc49..0a50593 100644 --- a/.zed/settings.json +++ b/.zed/settings.json @@ -26,6 +26,18 @@ }, "JSONC": { "formatter": { "language_server": { "name": "biome" } } + }, + "Terraform": { + "format_on_save": "on", + "formatter": "language_server", + "language_servers": ["terraform-ls"], + "tab_size": 2 + }, + "Terraform Vars": { + "format_on_save": "on", + "formatter": "language_server", + "language_servers": ["terraform-ls"], + "tab_size": 2 } } } From 8c3dde934cff88565aed579cce8b95a4af739518 Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 17:51:36 -0700 Subject: [PATCH 03/25] ci(terraform): add lint PR gate and task-beta template deploy workflows Co-Authored-By: Claude Opus 4.7 (1M context) --- .github/workflows/deploy-template.yaml | 41 ++++++++++++++++++++++++++ .github/workflows/terraform-lint.yaml | 39 ++++++++++++++++++++++++ 2 files changed, 80 insertions(+) create mode 100644 .github/workflows/deploy-template.yaml create mode 100644 .github/workflows/terraform-lint.yaml diff --git a/.github/workflows/deploy-template.yaml b/.github/workflows/deploy-template.yaml new file mode 100644 index 0000000..9bb015d --- /dev/null +++ b/.github/workflows/deploy-template.yaml @@ -0,0 +1,41 @@ +name: Deploy Coder Template + +on: + push: + branches: [main] + paths: + - "terraform/**" + - ".github/workflows/deploy-template.yaml" + workflow_dispatch: + +permissions: + contents: read + +concurrency: + group: deploy-template + cancel-in-progress: false + +jobs: + deploy: + runs-on: ubuntu-latest + environment: coder + env: + CODER_URL: ${{ secrets.CODER_URL }} + CODER_SESSION_TOKEN: ${{ secrets.CODER_SESSION_TOKEN }} + TEMPLATE_NAME: task-beta + steps: + - uses: actions/checkout@v4 + + - name: Install Coder CLI + run: curl -fsSL https://coder.com/install.sh | sh -s -- --version 2.18.2 + + - name: Verify Coder auth + run: coder users show me + + - name: Push template + run: | + coder templates push "$TEMPLATE_NAME" \ + --directory ./terraform \ + --yes \ + --name "run-${GITHUB_RUN_ID}" \ + --message "Deploy from ${GITHUB_SHA::7}" diff --git a/.github/workflows/terraform-lint.yaml b/.github/workflows/terraform-lint.yaml new file mode 100644 index 0000000..95b25dd --- /dev/null +++ b/.github/workflows/terraform-lint.yaml @@ -0,0 +1,39 @@ +name: Terraform Lint + +on: + pull_request: + paths: + - "terraform/**" + - ".github/workflows/terraform-lint.yaml" + +permissions: + contents: read + +jobs: + lint: + runs-on: ubuntu-latest + defaults: + run: + working-directory: terraform + steps: + - uses: actions/checkout@v4 + + - uses: hashicorp/setup-terraform@v3 + with: + terraform_version: "1.9.8" + terraform_wrapper: false + + - name: terraform fmt + run: terraform fmt -check -recursive -diff + + - uses: terraform-linters/setup-tflint@v4 + with: + tflint_version: v0.53.0 + + - name: tflint --init + run: tflint --init + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: tflint + run: tflint --recursive --format compact From b304bac25be2bd60897a584c678338d2d8f346ee Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 17:52:36 -0700 Subject: [PATCH 04/25] docs: document task-beta Coder template and its CI workflows Co-Authored-By: Claude Opus 4.7 (1M context) --- AGENTS.md | 17 +++++++++++++++++ README.md | 15 +++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/AGENTS.md b/AGENTS.md index 04ff8a8..5223b56 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -103,6 +103,23 @@ Cross-cutting: - **[docs/gotchas.md](docs/gotchas.md)** — collected foot-guns with context. Read before non-trivial changes. - **[docs/testing.md](docs/testing.md)** — test layers, `introspectWorkflow` patterns, fetch-mocking options. +## Terraform / Coder template + +The Coder template lives in [`terraform/`](terraform/) (template name +`task-beta`). Two workflows guard it: + +- [`.github/workflows/terraform-lint.yaml`](.github/workflows/terraform-lint.yaml) + — PR gate: `terraform fmt -check` + `tflint --recursive` (config in + [`terraform/.tflint.hcl`](terraform/.tflint.hcl)). +- [`.github/workflows/deploy-template.yaml`](.github/workflows/deploy-template.yaml) + — `main` push + manual dispatch: runs + `coder templates push task-beta --directory ./terraform`, authenticated with + the `CODER_URL` + `CODER_SESSION_TOKEN` repo secrets. + +Local: the devcontainer installs `terraform` + `tflint` at the same versions +CI pins (1.9.8 / 0.53.0). `.terraform/` is gitignored; +`terraform/.terraform.lock.hcl` is committed. + ## How to extend - **[docs/adding-an-event-type.md](docs/adding-an-event-type.md)** — checklist for wiring a new GitHub event into the router + a new step factory + tests. diff --git a/README.md b/README.md index b7becc9..c86dc4c 100644 --- a/README.md +++ b/README.md @@ -37,6 +37,21 @@ Two GitHub identities work together: See [docs/github-app-setup.md](docs/github-app-setup.md) for step-by-step instructions: creating the GitHub App, configuring webhook delivery, and installing it on your repositories. +## Coder Template + +This repo also ships the Coder template that provisions the ephemeral workspace +a task runs in. The template lives in [`terraform/`](terraform/README.md) and is +named **`task-beta`** on the Coder deployment. + +- [`terraform-lint.yaml`](.github/workflows/terraform-lint.yaml) gates PRs that + touch `terraform/**` on `terraform fmt` and `tflint`. +- [`deploy-template.yaml`](.github/workflows/deploy-template.yaml) pushes the + template to the Coder deployment (via `coder templates push task-beta`) on + every merge to `main` that touches `terraform/**`, and on manual dispatch. + +See [`terraform/README.md`](terraform/README.md) for what the template +provisions. + ## Configuration All non-secret config lives in [`wrangler.toml`](wrangler.toml) under `[vars]`. Secrets are provisioned via `wrangler secret put` in production and `.dev.vars` locally (see [`.dev.vars.example`](.dev.vars.example)). From 5f73eff9eb93132932668bc259f67f6501451207 Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 19:51:08 -0700 Subject: [PATCH 05/25] ci(terraform): add terraform test harness with mocked providers Co-Authored-By: Claude Sonnet 4.6 --- .github/workflows/terraform-lint.yaml | 6 +++ terraform/outputs.tf | 18 ++++++++ terraform/tests/task-metadata.tftest.hcl | 52 ++++++++++++++++++++++++ 3 files changed, 76 insertions(+) create mode 100644 terraform/outputs.tf create mode 100644 terraform/tests/task-metadata.tftest.hcl diff --git a/.github/workflows/terraform-lint.yaml b/.github/workflows/terraform-lint.yaml index 95b25dd..a16df22 100644 --- a/.github/workflows/terraform-lint.yaml +++ b/.github/workflows/terraform-lint.yaml @@ -26,6 +26,12 @@ jobs: - name: terraform fmt run: terraform fmt -check -recursive -diff + - name: terraform init + run: terraform init -backend=false + + - name: terraform test + run: terraform test + - uses: terraform-linters/setup-tflint@v4 with: tflint_version: v0.53.0 diff --git a/terraform/outputs.tf b/terraform/outputs.tf new file mode 100644 index 0000000..0966ec2 --- /dev/null +++ b/terraform/outputs.tf @@ -0,0 +1,18 @@ +# ─── Test-introspection outputs ────────────────────────────────────────────── +# Consumed by terraform/tests/*.tftest.hcl. Every key uses try() so this file +# stays valid as later phases introduce additional locals. + +output "task_metadata" { + value = { + repo_url = try(local.repo_url, "") + repo_name = try(local.repo_name, "") + ai_prompt = try(local.ai_prompt, "") + base_branch = try(local.base_branch, "") + size = try(local.size, "") + docker = try(local.docker, false) + extra_volumes = try(local.extra_volumes, []) + work_dir = try(local.work_dir, "") + git_url = try(local.git_url, "") + json_valid = try(local.json_valid, false) + } +} diff --git a/terraform/tests/task-metadata.tftest.hcl b/terraform/tests/task-metadata.tftest.hcl new file mode 100644 index 0000000..86a60bc --- /dev/null +++ b/terraform/tests/task-metadata.tftest.hcl @@ -0,0 +1,52 @@ +# ─── Mock providers ────────────────────────────────────────────────────────── + +mock_provider "coder" { + override_data { + target = data.coder_workspace.me + values = { + start_count = 0 + name = "test" + id = "00000000-0000-0000-0000-000000000000" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_workspace_owner.me + values = { + full_name = "Test User" + email = "test@example.test" + name = "test" + } + } +} + +mock_provider "kubernetes" {} + +# ─── Shared variables ──────────────────────────────────────────────────────── + +variables { + claude_code_oauth_token = "fake-oauth-token" + github_pat = "fake-pat" +} + +# ─── Smoke test ────────────────────────────────────────────────────────────── + +run "golden_path_parses" { + command = plan + + override_data { + target = data.coder_task.me + values = { + prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"Do the thing\"}" + } + } + + assert { + condition = output.task_metadata.repo_url == "https://github.com/acme/widget" + error_message = "repo_url did not round-trip from JSON prompt" + } + assert { + condition = output.task_metadata.json_valid == true + error_message = "json_valid should be true for a well-formed JSON prompt" + } +} From 88bccb4bde561c6792fe19c3aeb938d0696d14d6 Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 19:53:39 -0700 Subject: [PATCH 06/25] test(terraform): add failing parsing and precondition tests Co-Authored-By: Claude Sonnet 4.6 --- terraform/tests/task-metadata.tftest.hcl | 152 +++++++++++++++++++++++ 1 file changed, 152 insertions(+) diff --git a/terraform/tests/task-metadata.tftest.hcl b/terraform/tests/task-metadata.tftest.hcl index 86a60bc..217e4c3 100644 --- a/terraform/tests/task-metadata.tftest.hcl +++ b/terraform/tests/task-metadata.tftest.hcl @@ -50,3 +50,155 @@ run "golden_path_parses" { error_message = "json_valid should be true for a well-formed JSON prompt" } } + +# ─── Precondition firing ───────────────────────────────────────────────────── + +run "invalid_json_fails_precondition" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000001" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { prompt = "not-json" } + } + + expect_failures = [resource.coder_agent.dev] +} + +run "blank_repo_url_fails_precondition" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000002" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { prompt = "{\"repo_url\":\"\",\"repo_name\":\"x\",\"ai_prompt\":\"y\"}" } + } + + expect_failures = [resource.coder_agent.dev] +} + +run "blank_repo_name_fails_precondition" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000003" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { prompt = "{\"repo_url\":\"https://github.com/a/b\",\"repo_name\":\"\",\"ai_prompt\":\"y\"}" } + } + + expect_failures = [resource.coder_agent.dev] +} + +run "blank_ai_prompt_fails_precondition" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000004" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { prompt = "{\"repo_url\":\"https://github.com/a/b\",\"repo_name\":\"b\",\"ai_prompt\":\"\"}" } + } + + expect_failures = [resource.coder_agent.dev] +} + +# ─── Defaults and derivations ──────────────────────────────────────────────── + +run "defaults_applied_when_optionals_absent" { + command = plan + + override_data { + target = data.coder_task.me + values = { + prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"Do the thing\"}" + } + } + + assert { + condition = output.task_metadata.size == "large" + error_message = "size must default to 'large' when absent (EARS-6)" + } + assert { + condition = output.task_metadata.docker == false + error_message = "docker must default to false when absent" + } + assert { + condition = output.task_metadata.base_branch == "" + error_message = "base_branch must default to empty string when absent" + } + assert { + condition = length(output.task_metadata.extra_volumes) == 0 + error_message = "extra_volumes must default to empty list when absent" + } + assert { + condition = output.task_metadata.work_dir == "/workspaces/widget" + error_message = "work_dir must be /workspaces/ (EARS-16)" + } + assert { + condition = output.task_metadata.git_url == "https://github.com/acme/widget" + error_message = "git_url must equal repo_url when base_branch is empty (EARS-14)" + } +} + +run "base_branch_composes_git_url_suffix" { + command = plan + + override_data { + target = data.coder_task.me + values = { + prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"Do the thing\",\"base_branch\":\"feature-x\"}" + } + } + + assert { + condition = output.task_metadata.git_url == "https://github.com/acme/widget#refs/heads/feature-x" + error_message = "git_url must append #refs/heads/ when base_branch is set (EARS-14)" + } +} + +run "ai_prompt_passthrough_no_wrapping" { + command = plan + + override_data { + target = data.coder_task.me + values = { + prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"LITERAL_PROMPT_TOKEN\"}" + } + } + + assert { + condition = output.task_metadata.ai_prompt == "LITERAL_PROMPT_TOKEN" + error_message = "ai_prompt must be passed through verbatim, no template wrapping (EARS-15)" + } +} From d9c460ae70a88c3ad483037bacd124d24e4c1e8c Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 19:56:16 -0700 Subject: [PATCH 07/25] refactor(terraform): decode TaskMetadata JSON from prompt with fail-hard preconditions Co-Authored-By: Claude Sonnet 4.6 --- terraform/main.tf | 306 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 306 insertions(+) create mode 100644 terraform/main.tf diff --git a/terraform/main.tf b/terraform/main.tf new file mode 100644 index 0000000..26c9e74 --- /dev/null +++ b/terraform/main.tf @@ -0,0 +1,306 @@ +terraform { + required_providers { + coder = { source = "coder/coder" } + kubernetes = { source = "hashicorp/kubernetes", version = "~> 2.35.0" } + } +} + +provider "kubernetes" { + # Coder injects cluster credentials via the provisioner's service account +} + +# ─── Variables (baked into the template, not user-facing) ──────────────────── + +variable "claude_code_oauth_token" { + type = string + sensitive = true + description = "Claude Code OAuth token for AI agent authentication" + default = "" +} + +variable "github_pat" { + type = string + sensitive = true + description = "GitHub PAT for a non-org-member service account. Used to fork repos, comment on issues, and create cross-fork PRs." +} + +variable "ai_provider" { + type = string + description = "AI coding agent: claude_code or codex" + default = "claude_code" + + validation { + condition = contains(["claude_code", "codex"], var.ai_provider) + error_message = "ai_provider must be 'claude_code' or 'codex'" + } +} + +variable "codex_auth_token_json" { + type = string + sensitive = true + description = "Base64-encoded Codex auth.json for CI/CD file-based authentication" + default = "" +} + +# ─── Data sources ───────────────────────────────────────────────────────────── + +data "coder_workspace" "me" {} +data "coder_workspace_owner" "me" {} +data "coder_task" "me" {} + +# ─── Locals ─────────────────────────────────────────────────────────────────── + +locals { + use_claude = var.ai_provider == "claude_code" + + # ── Prompt decode ────────────────────────────────────────────────────── + raw_prompt = data.coder_task.me.prompt + parsed = try(jsondecode(local.raw_prompt), null) + json_valid = local.parsed != null + + # ── Required fields (validated in preconditions) ─────────────────────── + repo_url = try(local.parsed.repo_url, "") + repo_name = try(local.parsed.repo_name, "") + ai_prompt = try(local.parsed.ai_prompt, "") + + # ── Optional fields (defaults applied here) ──────────────────────────── + base_branch = try(local.parsed.base_branch, "") + size = try(local.parsed.size, "large") + docker = try(local.parsed.docker, false) + extra_volumes = try(local.parsed.extra_volumes, []) + + # ── Derived ──────────────────────────────────────────────────────────── + work_dir = "/workspaces/${local.repo_name}" + git_url = local.base_branch == "" ? local.repo_url : "${local.repo_url}#refs/heads/${local.base_branch}" +} + +# ─── Coder Agent ───────────────────────────────────────────────────────────── + +resource "coder_agent" "dev" { + count = data.coder_workspace.me.start_count + arch = "amd64" + auth = "token" + os = "linux" + dir = local.work_dir + connection_timeout = 600 + + lifecycle { + precondition { + condition = local.json_valid + error_message = "data.coder_task.me.prompt must be valid JSON matching the TaskMetadata schema" + } + precondition { + condition = local.repo_url != "" + error_message = "TaskMetadata.repo_url is required and must be non-blank" + } + precondition { + condition = local.repo_name != "" + error_message = "TaskMetadata.repo_name is required and must be non-blank" + } + precondition { + condition = local.ai_prompt != "" + error_message = "TaskMetadata.ai_prompt is required and must be non-blank" + } + precondition { + condition = !local.use_claude || var.claude_code_oauth_token != "" + error_message = "claude_code_oauth_token is required when ai_provider is claude_code" + } + precondition { + condition = local.use_claude || var.codex_auth_token_json != "" + error_message = "codex_auth_token_json is required when ai_provider is codex" + } + } + + env = { + GITHUB_TOKEN = var.github_pat + } + + startup_script = <<-EOT + # Trust GitHub's SSH host key so git operations don't prompt + mkdir -p ~/.ssh && chmod 700 ~/.ssh + ssh-keyscan -t ed25519 github.com >> ~/.ssh/known_hosts 2>/dev/null + + # Install gh CLI if missing (Debian; https://github.com/cli/cli/blob/trunk/docs/install_linux.md#debian) + if ! command -v gh >/dev/null 2>&1; then + SUDO="" + [ "$(id -u)" -ne 0 ] && SUDO="sudo" + (type -p wget >/dev/null || ($SUDO apt update && $SUDO apt-get install wget -y)) \ + && $SUDO mkdir -p -m 755 /etc/apt/keyrings \ + && out=$(mktemp) && wget -nv -O"$out" https://cli.github.com/packages/githubcli-archive-keyring.gpg \ + && cat "$out" | $SUDO tee /etc/apt/keyrings/githubcli-archive-keyring.gpg > /dev/null \ + && $SUDO chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg \ + && $SUDO mkdir -p -m 755 /etc/apt/sources.list.d \ + && echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | $SUDO tee /etc/apt/sources.list.d/github-cli.list > /dev/null \ + && $SUDO apt update \ + && $SUDO apt install gh -y + fi + + # Authenticate gh CLI with the baked-in PAT and configure git credentials + echo "$GITHUB_TOKEN" | gh auth login --with-token + gh auth setup-git + + # Configure git identity from the GitHub PAT user + git config --global user.name "$(gh api user --jq .login)" + git config --global user.email "$(gh api user --jq '.id | tostring + "+\(.login)@users.noreply.github.com"')" + EOT + + metadata { + key = "cpu" + display_name = "CPU Usage" + interval = 5 + timeout = 5 + script = "coder stat cpu" + } + + metadata { + key = "memory" + display_name = "Memory Usage" + interval = 5 + timeout = 5 + script = "coder stat mem" + } +} + +# ─── Claude Code ────────────────────────────────────────────────────────���──── + +module "claude-code" { + count = local.use_claude ? data.coder_workspace.me.start_count : 0 + source = "./modules/claude-code-agent" + + start_count = 1 + agent_id = coder_agent.dev[0].id + work_dir = local.work_dir + oauth_token = var.claude_code_oauth_token + ai_prompt = local.ai_prompt + +} + +# ─── Codex ─────────────────────────────────────────────────────────────────── + +module "codex" { + count = local.use_claude ? 0 : data.coder_workspace.me.start_count + source = "registry.coder.com/coder-labs/codex/coder" + version = "4.3.1" + agent_id = coder_agent.dev[count.index].id + workdir = local.work_dir + ai_prompt = replace(local.ai_prompt, "/coder-task", "$coder-task") + + pre_install_script = <<-EOT + # Symlink persistent agent state + mkdir -p /persist/agent-state/codex /persist/agent-state/codex-module + ln -sfn /persist/agent-state/codex "$HOME/.codex" + ln -sfn /persist/agent-state/codex-module "$HOME/.codex-module" + + # Install code-factory plugin for Codex + git clone --depth 1 https://github.com/xmtplabs/code-factory.git /tmp/code-factory + + mkdir -p ~/.agents/skills + cp -R /tmp/code-factory/skills/* ~/.agents/skills/ + + mkdir -p ~/.codex/agents + cp -R /tmp/code-factory/.codex/agents/* ~/.codex/agents/ + + mkdir -p ~/.agents/plugins/plugins + cp -R /tmp/code-factory ~/.agents/plugins/plugins/code-factory + cat > ~/.agents/plugins/marketplace.json <<'MKJSON' + { + "name": "personal-plugins", + "interface": { + "displayName": "Personal Plugins" + }, + "plugins": [ + { + "name": "code-factory", + "source": { + "source": "local", + "path": "./plugins/code-factory" + }, + "policy": { + "installation": "INSTALLED_BY_DEFAULT", + "authentication": "ON_INSTALL" + }, + "category": "Development" + } + ] + } + MKJSON + + rm -rf /tmp/code-factory + EOT + + post_install_script = <<-EOT + echo -n '${var.codex_auth_token_json}' | base64 -d > "$HOME/.codex/auth.json" + chmod 600 "$HOME/.codex/auth.json" + EOT + + base_config_toml = <<-EOT + sandbox_mode = "danger-full-access" + approval_policy = "never" + cli_auth_credentials_store = "file" + [projects."${local.work_dir}"] + trust_level = "trusted" + EOT +} + +# ─── Workspace Pod ──────────────────────────────────��──────────────────────── + +module "workspace" { + source = "./modules/workspace-pod" + + workspace_name = data.coder_workspace.me.name + workspace_id = data.coder_workspace.me.id + start_count = data.coder_workspace.me.start_count + owner_name = data.coder_workspace_owner.me.full_name + owner_email = data.coder_workspace_owner.me.email + owner_username = data.coder_workspace_owner.me.name + + agent_token = try(coder_agent.dev[0].token, "") + agent_init_script = try(coder_agent.dev[0].init_script, "") + access_url = data.coder_workspace.me.access_url + + deployment_type = "pod" + restart_policy = "Never" + termination_grace_period_seconds = 30 + do_not_disrupt = true + git_url = local.git_url + workspace_size = "30Gi" + app_name = "coder-task" + name_prefix = "task" + + dev_resources = { + requests = { cpu = "2", memory = "8Gi", "ephemeral-storage" = "30Gi" } + limits = { cpu = "8", memory = "24Gi", "ephemeral-storage" = "50Gi" } + } + + dind_resources = { + requests = { cpu = "250m", memory = "1Gi", "ephemeral-storage" = "5Gi" } + limits = { cpu = "2", memory = "4Gi", "ephemeral-storage" = "20Gi" } + } + + volumes = [ + { name = "docker-cache", size = "10Gi", mount_path = "/var/lib/docker", persistent = false, containers = "dind" }, + ] +} + +# ─── AI Task ───────────────────────────────────────────────────────���───────── + +resource "coder_ai_task" "task" { + count = data.coder_workspace.me.start_count + app_id = local.use_claude ? module.claude-code[0].task_app_id : module.codex[0].task_app_id +} + +# ─── Dashboard metadata ─────────────────────────────��──────────────────────── + +resource "coder_metadata" "task_info" { + count = data.coder_workspace.me.start_count + resource_id = coder_agent.dev[count.index].id + + item { + key = "pod" + value = module.workspace.pod_name + } + item { + key = "repo" + value = local.repo_url + } +} From 8dae2b83667650cbfc5db6e061a8b1e64c7df77a Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 20:02:00 -0700 Subject: [PATCH 08/25] fix(terraform): tighten json_valid, null-safe base_branch, and backfill EARS-17 test Co-Authored-By: Claude Sonnet 4.6 --- terraform/main.tf | 11 ++-- terraform/tests/task-metadata.tftest.hcl | 70 ++++++++++++++++++++++++ 2 files changed, 76 insertions(+), 5 deletions(-) diff --git a/terraform/main.tf b/terraform/main.tf index 26c9e74..51c3c6f 100644 --- a/terraform/main.tf +++ b/terraform/main.tf @@ -56,7 +56,7 @@ locals { # ── Prompt decode ────────────────────────────────────────────────────── raw_prompt = data.coder_task.me.prompt parsed = try(jsondecode(local.raw_prompt), null) - json_valid = local.parsed != null + json_valid = can(local.parsed.repo_url) && can(local.parsed.repo_name) && can(local.parsed.ai_prompt) # ── Required fields (validated in preconditions) ─────────────────────── repo_url = try(local.parsed.repo_url, "") @@ -64,10 +64,11 @@ locals { ai_prompt = try(local.parsed.ai_prompt, "") # ── Optional fields (defaults applied here) ──────────────────────────── - base_branch = try(local.parsed.base_branch, "") - size = try(local.parsed.size, "large") - docker = try(local.parsed.docker, false) - extra_volumes = try(local.parsed.extra_volumes, []) + base_branch_raw = try(local.parsed.base_branch, null) + base_branch = local.base_branch_raw == null ? "" : local.base_branch_raw + size = try(local.parsed.size, "large") + docker = try(local.parsed.docker, false) + extra_volumes = try(local.parsed.extra_volumes, []) # ── Derived ──────────────────────────────────────────────────────────── work_dir = "/workspaces/${local.repo_name}" diff --git a/terraform/tests/task-metadata.tftest.hcl b/terraform/tests/task-metadata.tftest.hcl index 217e4c3..cc30a04 100644 --- a/terraform/tests/task-metadata.tftest.hcl +++ b/terraform/tests/task-metadata.tftest.hcl @@ -34,6 +34,15 @@ variables { run "golden_path_parses" { command = plan + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000000" + access_url = "https://example.test" + } + } override_data { target = data.coder_task.me values = { @@ -52,6 +61,13 @@ run "golden_path_parses" { } # ─── Precondition firing ───────────────────────────────────────────────────── +# +# Each fixture below violates EXACTLY ONE precondition. Other required fields +# remain non-blank and the JSON remains structurally valid so the NAMED +# precondition is the one that trips — not a sibling. When adding new +# preconditions or reordering the `lifecycle.precondition` list in +# terraform/main.tf, update these fixtures in lockstep; otherwise +# `expect_failures` may pass for the wrong reason. run "invalid_json_fails_precondition" { command = plan @@ -138,6 +154,15 @@ run "blank_ai_prompt_fails_precondition" { run "defaults_applied_when_optionals_absent" { command = plan + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000005" + access_url = "https://example.test" + } + } override_data { target = data.coder_task.me values = { @@ -174,6 +199,15 @@ run "defaults_applied_when_optionals_absent" { run "base_branch_composes_git_url_suffix" { command = plan + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000006" + access_url = "https://example.test" + } + } override_data { target = data.coder_task.me values = { @@ -190,6 +224,15 @@ run "base_branch_composes_git_url_suffix" { run "ai_prompt_passthrough_no_wrapping" { command = plan + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000007" + access_url = "https://example.test" + } + } override_data { target = data.coder_task.me values = { @@ -202,3 +245,30 @@ run "ai_prompt_passthrough_no_wrapping" { error_message = "ai_prompt must be passed through verbatim, no template wrapping (EARS-15)" } } + +# ─── Dashboard metadata ────────────────────────────────────────────────────── + +run "coder_metadata_exposes_repo_url" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000017" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { + prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"Do the thing\"}" + } + } + + assert { + condition = length([for i in coder_metadata.task_info[0].item : i if i.key == "repo" && i.value == "https://github.com/acme/widget"]) == 1 + error_message = "coder_metadata.task_info must expose repo_url via a 'repo' item (EARS-17)" + } +} From 906e750dfc0b49fb4602df9a233594cca69361be Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 20:04:37 -0700 Subject: [PATCH 09/25] test(terraform): cover non-object JSON for EARS-1 regression Co-Authored-By: Claude Sonnet 4.6 --- terraform/tests/task-metadata.tftest.hcl | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/terraform/tests/task-metadata.tftest.hcl b/terraform/tests/task-metadata.tftest.hcl index cc30a04..e10b1ee 100644 --- a/terraform/tests/task-metadata.tftest.hcl +++ b/terraform/tests/task-metadata.tftest.hcl @@ -89,6 +89,29 @@ run "invalid_json_fails_precondition" { expect_failures = [resource.coder_agent.dev] } +run "non_object_json_fails_precondition" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000008" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { prompt = "[1,2,3]" } + } + + # Valid JSON but not a TaskMetadata object. json_valid must be false so + # EARS-1 trips (not EARS-2 via try() returning ""). Guards the can()-based + # tightening of local.json_valid in terraform/main.tf against regression. + expect_failures = [resource.coder_agent.dev] +} + run "blank_repo_url_fails_precondition" { command = plan From 9f9a588b1d34719e2935f0008f2bec9719d59926 Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 20:06:52 -0700 Subject: [PATCH 10/25] test(terraform): add failing size-profile and dind-constant tests Co-Authored-By: Claude Sonnet 4.6 --- terraform/outputs.tf | 8 + terraform/tests/task-metadata.tftest.hcl | 199 +++++++++++++++++++++++ 2 files changed, 207 insertions(+) diff --git a/terraform/outputs.tf b/terraform/outputs.tf index 0966ec2..7764cb5 100644 --- a/terraform/outputs.tf +++ b/terraform/outputs.tf @@ -16,3 +16,11 @@ output "task_metadata" { json_valid = try(local.json_valid, false) } } + +output "dev_resources" { + value = try(local.dev_resources, null) +} + +output "dind_resources" { + value = try(local.dind_resources, null) +} diff --git a/terraform/tests/task-metadata.tftest.hcl b/terraform/tests/task-metadata.tftest.hcl index e10b1ee..20c3bc2 100644 --- a/terraform/tests/task-metadata.tftest.hcl +++ b/terraform/tests/task-metadata.tftest.hcl @@ -295,3 +295,202 @@ run "coder_metadata_exposes_repo_url" { error_message = "coder_metadata.task_info must expose repo_url via a 'repo' item (EARS-17)" } } + +# ─── Size profiles ─────────────────────────────────────────────────────────── + +run "size_default_large_profile" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000009" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { + prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\"}" + } + } + + assert { + condition = output.dev_resources.requests.cpu == "2" + error_message = "default size (absent) must apply the large profile — requests.cpu (EARS-6, EARS-9)" + } + assert { + condition = output.dev_resources.requests.memory == "8Gi" + error_message = "default (large) requests.memory (EARS-9)" + } + assert { + condition = output.dev_resources.requests["ephemeral-storage"] == "30Gi" + error_message = "default (large) requests.ephemeral-storage (EARS-9)" + } + assert { + condition = output.dev_resources.limits.cpu == "8" + error_message = "default (large) limits.cpu (EARS-9)" + } + assert { + condition = output.dev_resources.limits.memory == "24Gi" + error_message = "default (large) limits.memory (EARS-9)" + } + assert { + condition = output.dev_resources.limits["ephemeral-storage"] == "50Gi" + error_message = "default (large) limits.ephemeral-storage (EARS-9)" + } +} + +run "size_small_profile" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000010" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { + prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"size\":\"small\"}" + } + } + + assert { + condition = output.dev_resources.requests.cpu == "1" && output.dev_resources.requests.memory == "4Gi" && output.dev_resources.requests["ephemeral-storage"] == "10Gi" + error_message = "small profile requests mismatch (EARS-7): expected {cpu=1, memory=4Gi, ephemeral-storage=10Gi}" + } + assert { + condition = output.dev_resources.limits.cpu == "4" && output.dev_resources.limits.memory == "8Gi" && output.dev_resources.limits["ephemeral-storage"] == "20Gi" + error_message = "small profile limits mismatch (EARS-7): expected {cpu=4, memory=8Gi, ephemeral-storage=20Gi}" + } +} + +run "size_medium_profile" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000011" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { + prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"size\":\"medium\"}" + } + } + + assert { + condition = output.dev_resources.requests.cpu == "1" && output.dev_resources.requests.memory == "4Gi" && output.dev_resources.requests["ephemeral-storage"] == "20Gi" + error_message = "medium profile requests mismatch (EARS-8): expected {cpu=1, memory=4Gi, ephemeral-storage=20Gi}" + } + assert { + condition = output.dev_resources.limits.cpu == "8" && output.dev_resources.limits.memory == "12Gi" && output.dev_resources.limits["ephemeral-storage"] == "30Gi" + error_message = "medium profile limits mismatch (EARS-8): expected {cpu=8, memory=12Gi, ephemeral-storage=30Gi}" + } +} + +run "size_large_profile_explicit" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000012" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { + prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"size\":\"large\"}" + } + } + + assert { + condition = output.dev_resources.requests.cpu == "2" && output.dev_resources.requests.memory == "8Gi" && output.dev_resources.requests["ephemeral-storage"] == "30Gi" + error_message = "explicit large profile requests mismatch (EARS-9)" + } + assert { + condition = output.dev_resources.limits.cpu == "8" && output.dev_resources.limits.memory == "24Gi" && output.dev_resources.limits["ephemeral-storage"] == "50Gi" + error_message = "explicit large profile limits mismatch (EARS-9)" + } +} + +run "size_invalid_fails_precondition" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000013" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { + prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"size\":\"xl\"}" + } + } + + # Violates the size-allowlist precondition ONLY — other required fields + # remain non-blank and the JSON is valid. Maintains the one-violation-per- + # fixture invariant documented above the Phase 2 precondition block. + expect_failures = [resource.coder_agent.dev] +} + +run "dind_resources_constant_across_sizes" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000014" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { + prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"size\":\"small\"}" + } + } + + # Under all three sizes the dind sidecar profile MUST be identical. We sample + # one size here; the other two sizes' size_*_profile runs implicitly cover + # the invariant by having the same dind assertion pass. (EARS-10) + assert { + condition = output.dind_resources.requests.cpu == "250m" + error_message = "dind requests.cpu must be 250m across all sizes (EARS-10)" + } + assert { + condition = output.dind_resources.requests.memory == "1Gi" + error_message = "dind requests.memory must be 1Gi across all sizes (EARS-10)" + } + assert { + condition = output.dind_resources.limits.cpu == "2" + error_message = "dind limits.cpu must be 2 across all sizes (EARS-10)" + } + assert { + condition = output.dind_resources.limits.memory == "4Gi" + error_message = "dind limits.memory must be 4Gi across all sizes (EARS-10)" + } +} From 2851766acf76a274eec5cb1044a93e31967a9c03 Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 20:08:29 -0700 Subject: [PATCH 11/25] feat(terraform): add small/medium/large size profiles (default large) Co-Authored-By: Claude Sonnet 4.6 --- terraform/main.tf | 38 +++++++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/terraform/main.tf b/terraform/main.tf index 51c3c6f..2384aeb 100644 --- a/terraform/main.tf +++ b/terraform/main.tf @@ -73,6 +73,29 @@ locals { # ── Derived ──────────────────────────────────────────────────────────── work_dir = "/workspaces/${local.repo_name}" git_url = local.base_branch == "" ? local.repo_url : "${local.repo_url}#refs/heads/${local.base_branch}" + + # ── Resource profiles (dev container) ──────────────────────────────────── + size_profiles = { + small = { + requests = { cpu = "1", memory = "4Gi", "ephemeral-storage" = "10Gi" } + limits = { cpu = "4", memory = "8Gi", "ephemeral-storage" = "20Gi" } + } + medium = { + requests = { cpu = "1", memory = "4Gi", "ephemeral-storage" = "20Gi" } + limits = { cpu = "8", memory = "12Gi", "ephemeral-storage" = "30Gi" } + } + large = { + requests = { cpu = "2", memory = "8Gi", "ephemeral-storage" = "30Gi" } + limits = { cpu = "8", memory = "24Gi", "ephemeral-storage" = "50Gi" } + } + } + dev_resources = try(local.size_profiles[local.size], local.size_profiles["large"]) + + # ── dind resources (constant across sizes) ─────────────────────────────── + dind_resources = { + requests = { cpu = "250m", memory = "1Gi", "ephemeral-storage" = "5Gi" } + limits = { cpu = "2", memory = "4Gi", "ephemeral-storage" = "20Gi" } + } } # ─── Coder Agent ───────────────────────────────────────────────────────────── @@ -102,6 +125,10 @@ resource "coder_agent" "dev" { condition = local.ai_prompt != "" error_message = "TaskMetadata.ai_prompt is required and must be non-blank" } + precondition { + condition = contains(["small", "medium", "large"], local.size) + error_message = "TaskMetadata.size must be one of 'small', 'medium', 'large'" + } precondition { condition = !local.use_claude || var.claude_code_oauth_token != "" error_message = "claude_code_oauth_token is required when ai_provider is claude_code" @@ -268,15 +295,8 @@ module "workspace" { app_name = "coder-task" name_prefix = "task" - dev_resources = { - requests = { cpu = "2", memory = "8Gi", "ephemeral-storage" = "30Gi" } - limits = { cpu = "8", memory = "24Gi", "ephemeral-storage" = "50Gi" } - } - - dind_resources = { - requests = { cpu = "250m", memory = "1Gi", "ephemeral-storage" = "5Gi" } - limits = { cpu = "2", memory = "4Gi", "ephemeral-storage" = "20Gi" } - } + dev_resources = local.dev_resources + dind_resources = local.dind_resources volumes = [ { name = "docker-cache", size = "10Gi", mount_path = "/var/lib/docker", persistent = false, containers = "dind" }, From 91e93bfd890a3ba5f4f2b1090615a5a05a5c7ec7 Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 20:11:25 -0700 Subject: [PATCH 12/25] test(terraform): verify dind constancy at each size and cover ephemeral-storage Co-Authored-By: Claude Sonnet 4.6 --- terraform/main.tf | 2 + terraform/tests/task-metadata.tftest.hcl | 66 ++++++++++-------------- 2 files changed, 29 insertions(+), 39 deletions(-) diff --git a/terraform/main.tf b/terraform/main.tf index 2384aeb..501d8e0 100644 --- a/terraform/main.tf +++ b/terraform/main.tf @@ -89,6 +89,8 @@ locals { limits = { cpu = "8", memory = "24Gi", "ephemeral-storage" = "50Gi" } } } + # Fallback keeps the map lookup from erroring before the size precondition + # below can fire with a clean error message. dev_resources = try(local.size_profiles[local.size], local.size_profiles["large"]) # ── dind resources (constant across sizes) ─────────────────────────────── diff --git a/terraform/tests/task-metadata.tftest.hcl b/terraform/tests/task-metadata.tftest.hcl index 20c3bc2..d87e63b 100644 --- a/terraform/tests/task-metadata.tftest.hcl +++ b/terraform/tests/task-metadata.tftest.hcl @@ -370,6 +370,15 @@ run "size_small_profile" { condition = output.dev_resources.limits.cpu == "4" && output.dev_resources.limits.memory == "8Gi" && output.dev_resources.limits["ephemeral-storage"] == "20Gi" error_message = "small profile limits mismatch (EARS-7): expected {cpu=4, memory=8Gi, ephemeral-storage=20Gi}" } + # EARS-10: dind resources must be identical across all sizes. + assert { + condition = output.dind_resources.requests.cpu == "250m" && output.dind_resources.requests.memory == "1Gi" && output.dind_resources.requests["ephemeral-storage"] == "5Gi" + error_message = "dind requests must be {cpu=250m, memory=1Gi, ephemeral-storage=5Gi} across all sizes (EARS-10)" + } + assert { + condition = output.dind_resources.limits.cpu == "2" && output.dind_resources.limits.memory == "4Gi" && output.dind_resources.limits["ephemeral-storage"] == "20Gi" + error_message = "dind limits must be {cpu=2, memory=4Gi, ephemeral-storage=20Gi} across all sizes (EARS-10)" + } } run "size_medium_profile" { @@ -399,6 +408,15 @@ run "size_medium_profile" { condition = output.dev_resources.limits.cpu == "8" && output.dev_resources.limits.memory == "12Gi" && output.dev_resources.limits["ephemeral-storage"] == "30Gi" error_message = "medium profile limits mismatch (EARS-8): expected {cpu=8, memory=12Gi, ephemeral-storage=30Gi}" } + # EARS-10: dind resources must be identical across all sizes. + assert { + condition = output.dind_resources.requests.cpu == "250m" && output.dind_resources.requests.memory == "1Gi" && output.dind_resources.requests["ephemeral-storage"] == "5Gi" + error_message = "dind requests must be {cpu=250m, memory=1Gi, ephemeral-storage=5Gi} across all sizes (EARS-10)" + } + assert { + condition = output.dind_resources.limits.cpu == "2" && output.dind_resources.limits.memory == "4Gi" && output.dind_resources.limits["ephemeral-storage"] == "20Gi" + error_message = "dind limits must be {cpu=2, memory=4Gi, ephemeral-storage=20Gi} across all sizes (EARS-10)" + } } run "size_large_profile_explicit" { @@ -428,6 +446,15 @@ run "size_large_profile_explicit" { condition = output.dev_resources.limits.cpu == "8" && output.dev_resources.limits.memory == "24Gi" && output.dev_resources.limits["ephemeral-storage"] == "50Gi" error_message = "explicit large profile limits mismatch (EARS-9)" } + # EARS-10: dind resources must be identical across all sizes. + assert { + condition = output.dind_resources.requests.cpu == "250m" && output.dind_resources.requests.memory == "1Gi" && output.dind_resources.requests["ephemeral-storage"] == "5Gi" + error_message = "dind requests must be {cpu=250m, memory=1Gi, ephemeral-storage=5Gi} across all sizes (EARS-10)" + } + assert { + condition = output.dind_resources.limits.cpu == "2" && output.dind_resources.limits.memory == "4Gi" && output.dind_resources.limits["ephemeral-storage"] == "20Gi" + error_message = "dind limits must be {cpu=2, memory=4Gi, ephemeral-storage=20Gi} across all sizes (EARS-10)" + } } run "size_invalid_fails_precondition" { @@ -455,42 +482,3 @@ run "size_invalid_fails_precondition" { expect_failures = [resource.coder_agent.dev] } -run "dind_resources_constant_across_sizes" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000014" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { - prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"size\":\"small\"}" - } - } - - # Under all three sizes the dind sidecar profile MUST be identical. We sample - # one size here; the other two sizes' size_*_profile runs implicitly cover - # the invariant by having the same dind assertion pass. (EARS-10) - assert { - condition = output.dind_resources.requests.cpu == "250m" - error_message = "dind requests.cpu must be 250m across all sizes (EARS-10)" - } - assert { - condition = output.dind_resources.requests.memory == "1Gi" - error_message = "dind requests.memory must be 1Gi across all sizes (EARS-10)" - } - assert { - condition = output.dind_resources.limits.cpu == "2" - error_message = "dind limits.cpu must be 2 across all sizes (EARS-10)" - } - assert { - condition = output.dind_resources.limits.memory == "4Gi" - error_message = "dind limits.memory must be 4Gi across all sizes (EARS-10)" - } -} From 18f0840cd0a753975390f2974811a882adc6cb01 Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 20:14:35 -0700 Subject: [PATCH 13/25] test(terraform): add failing docker-sidecar-gating tests Co-Authored-By: Claude Sonnet 4.6 --- terraform/modules/workspace-pod/outputs.tf | 15 ++++ terraform/outputs.tf | 4 + terraform/tests/task-metadata.tftest.hcl | 91 ++++++++++++++++++++++ 3 files changed, 110 insertions(+) create mode 100644 terraform/modules/workspace-pod/outputs.tf diff --git a/terraform/modules/workspace-pod/outputs.tf b/terraform/modules/workspace-pod/outputs.tf new file mode 100644 index 0000000..7222fb3 --- /dev/null +++ b/terraform/modules/workspace-pod/outputs.tf @@ -0,0 +1,15 @@ +output "pod_name" { + description = "Name of the created pod or deployment" + value = var.deployment_type == "deployment" ? ( + length(kubernetes_deployment_v1.workspace) > 0 ? kubernetes_deployment_v1.workspace[0].metadata[0].name : "" + ) : ( + length(kubernetes_pod_v1.workspace) > 0 ? kubernetes_pod_v1.workspace[0].metadata[0].name : "" + ) +} + +# Test-introspection outputs. Internal contract — consumed only by +# terraform/tests/*.tftest.hcl, not by the root module's production path. + +output "docker_enabled" { + value = var.docker_enabled +} diff --git a/terraform/outputs.tf b/terraform/outputs.tf index 7764cb5..3b2dbf2 100644 --- a/terraform/outputs.tf +++ b/terraform/outputs.tf @@ -24,3 +24,7 @@ output "dev_resources" { output "dind_resources" { value = try(local.dind_resources, null) } + +output "docker_enabled" { + value = try(module.workspace.docker_enabled, null) +} diff --git a/terraform/tests/task-metadata.tftest.hcl b/terraform/tests/task-metadata.tftest.hcl index d87e63b..dc02271 100644 --- a/terraform/tests/task-metadata.tftest.hcl +++ b/terraform/tests/task-metadata.tftest.hcl @@ -482,3 +482,94 @@ run "size_invalid_fails_precondition" { expect_failures = [resource.coder_agent.dev] } +# ─── Docker sidecar gating ────────────────────────────────────────────────── + +run "docker_false_by_default" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000015" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { + prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\"}" + } + } + + # EARS-6 default-docker behavior: absent => false => dind container omitted + assert { + condition = output.docker_enabled == false + error_message = "docker must default to false when absent, and the workspace-pod module must receive docker_enabled=false (EARS-11)" + } + assert { + condition = length([for c in kubernetes_pod_v1.workspace[0].spec[0].container : c if c.name == "dind"]) == 0 + error_message = "dind container must not be rendered when docker=false (EARS-11)" + } +} + +run "docker_true_enables_sidecar" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000016" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { + prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"docker\":true}" + } + } + + assert { + condition = output.docker_enabled == true + error_message = "docker=true must propagate to workspace-pod.docker_enabled (EARS-12)" + } + assert { + condition = length([for c in kubernetes_pod_v1.workspace[0].spec[0].container : c if c.name == "dind"]) == 1 + error_message = "dind container must be rendered exactly once when docker=true (EARS-12)" + } + assert { + condition = length([for c in kubernetes_pod_v1.workspace[0].spec[0].container : c if c.name == "dev" && length([for e in c.env : e if e.name == "DOCKER_HOST"]) > 0]) == 1 + error_message = "DOCKER_HOST env must be present on dev container when docker=true (EARS-12)" + } +} + +run "docker_false_sets_no_docker_host" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000018" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { + prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"docker\":false}" + } + } + + # EARS-11: when docker=false, DOCKER_HOST must NOT appear on the dev container + assert { + condition = length([for c in kubernetes_pod_v1.workspace[0].spec[0].container : c if c.name == "dev" && length([for e in c.env : e if e.name == "DOCKER_HOST"]) > 0]) == 0 + error_message = "DOCKER_HOST env must not be set on dev container when docker=false (EARS-11)" + } +} + From 479938b4b6b5799f65f150330592315fecf02381 Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 20:26:49 -0700 Subject: [PATCH 14/25] feat(terraform): gate dind sidecar and DOCKER_HOST env on docker_enabled Declares var.docker_enabled (default false) in the workspace-pod module, wraps the DOCKER_HOST env and dind container in dynamic blocks gated on that variable, and moves the kubernetes_pod_v1/kubernetes_deployment_v1 resources to root scope so terraform test assertions can reference them directly. Threads docker_enabled = local.docker through the module call so the TaskMetadata docker field controls sidecar presence end-to-end. Co-Authored-By: Claude Sonnet 4.6 --- terraform/main.tf | 1 + terraform/modules/workspace-pod/main.tf | 130 ++++++ terraform/modules/workspace-pod/outputs.tf | 64 ++- terraform/modules/workspace-pod/variables.tf | 162 ++++++++ terraform/workspace.tf | 396 +++++++++++++++++++ 5 files changed, 744 insertions(+), 9 deletions(-) create mode 100644 terraform/modules/workspace-pod/main.tf create mode 100644 terraform/modules/workspace-pod/variables.tf create mode 100644 terraform/workspace.tf diff --git a/terraform/main.tf b/terraform/main.tf index 501d8e0..78b19e5 100644 --- a/terraform/main.tf +++ b/terraform/main.tf @@ -292,6 +292,7 @@ module "workspace" { restart_policy = "Never" termination_grace_period_seconds = 30 do_not_disrupt = true + docker_enabled = local.docker git_url = local.git_url workspace_size = "30Gi" app_name = "coder-task" diff --git a/terraform/modules/workspace-pod/main.tf b/terraform/modules/workspace-pod/main.tf new file mode 100644 index 0000000..d8988ac --- /dev/null +++ b/terraform/modules/workspace-pod/main.tf @@ -0,0 +1,130 @@ +terraform { + required_providers { + kubernetes = { source = "hashicorp/kubernetes", version = "~> 2.35.0" } + } +} + +# ─── Locals ────────────────────────────────────────────────────────────────── + +locals { + hostname = lower(var.workspace_name) + ws_id = substr(var.workspace_id, 0, 8) + slug = "${local.hostname}-${local.ws_id}" + + labels = { + "app.kubernetes.io/name" = var.app_name + "app.kubernetes.io/instance" = local.hostname + "app.kubernetes.io/managed-by" = "coder" + } + + annotations = { + "coder.com/owner-name" = var.owner_name + "coder.com/owner-email" = var.owner_email + "coder.com/owner" = var.owner_username + "coder.com/workspace" = var.workspace_name + } + + # Standard setup script + optional caller additions + setup_script = join("; ", compact([ + "chown -R 1000:1000 /workspaces /persist/agent-state", + var.setup_script, + ])) + + # Init script: drop to UID 1000 if root, then run agent init + init_script = join(" && ", [ + "echo ${base64encode(var.agent_init_script)} | base64 -d > /tmp/init.sh", + "chmod +x /tmp/init.sh", + "if [ \"$(id -u)\" = \"0\" ]; then export HOME=$(getent passwd 1000 | cut -d: -f6); exec setpriv --reuid=1000 --regid=1000 --init-groups /bin/bash /tmp/init.sh; else exec /bin/bash /tmp/init.sh; fi", + ]) + + # Filter volumes by count > 0 + active_volumes = [for v in var.volumes : v if v.count > 0] + + # Volumes for dev container (all standard + volumes with containers "dev" or "both") + dev_extra_mounts = [for v in local.active_volumes : v if contains(["dev", "both"], v.containers)] + dind_extra_mounts = [for v in local.active_volumes : v if contains(["dind", "both"], v.containers)] + + pod_annotations = var.do_not_disrupt ? merge(local.annotations, { + "karpenter.sh/do-not-disrupt" = "true" + }) : local.annotations +} + +# ─── Standard PVCs ─────────────────────────────────────────────────────────── + +resource "kubernetes_persistent_volume_claim_v1" "workspace" { + metadata { + name = "${var.name_prefix}-workspace-${local.slug}" + namespace = "coder" + labels = local.labels + annotations = local.annotations + } + + spec { + access_modes = ["ReadWriteOnce"] + storage_class_name = "gp3" + + resources { + requests = { + storage = var.workspace_size + } + } + } + + wait_until_bound = false + + lifecycle { + ignore_changes = [spec[0].resources[0].requests] + } +} + +resource "kubernetes_persistent_volume_claim_v1" "agent_state" { + metadata { + name = "${var.name_prefix}-agent-state-${local.slug}" + namespace = "coder" + labels = local.labels + annotations = local.annotations + } + + spec { + access_modes = ["ReadWriteOnce"] + storage_class_name = "gp3" + + resources { + requests = { + storage = "1Gi" + } + } + } + + wait_until_bound = false + + lifecycle { + ignore_changes = [spec[0].resources[0].requests] + } +} + +# ─── Extra PVCs (persistent volumes from var.volumes) ──────────────────────── + +resource "kubernetes_persistent_volume_claim_v1" "extra" { + for_each = { for v in local.active_volumes : v.name => v if v.persistent } + + metadata { + name = "${each.key}-${local.slug}" + namespace = "coder" + labels = local.labels + annotations = local.annotations + } + + spec { + access_modes = ["ReadWriteOnce"] + storage_class_name = "gp3" + + resources { + requests = { + storage = each.value.size + } + } + } + + wait_until_bound = false +} diff --git a/terraform/modules/workspace-pod/outputs.tf b/terraform/modules/workspace-pod/outputs.tf index 7222fb3..d2ed56d 100644 --- a/terraform/modules/workspace-pod/outputs.tf +++ b/terraform/modules/workspace-pod/outputs.tf @@ -1,15 +1,61 @@ output "pod_name" { - description = "Name of the created pod or deployment" - value = var.deployment_type == "deployment" ? ( - length(kubernetes_deployment_v1.workspace) > 0 ? kubernetes_deployment_v1.workspace[0].metadata[0].name : "" - ) : ( - length(kubernetes_pod_v1.workspace) > 0 ? kubernetes_pod_v1.workspace[0].metadata[0].name : "" - ) + description = "Computed name of the pod or deployment (used by callers and coder_metadata)" + value = "${var.name_prefix}-${local.slug}" } -# Test-introspection outputs. Internal contract — consumed only by -# terraform/tests/*.tftest.hcl, not by the root module's production path. - output "docker_enabled" { value = var.docker_enabled } + +# ─── Outputs consumed by root-level kubernetes resources ───────────────────── +# These expose internal computed values so the root module can create the +# kubernetes_pod_v1 / kubernetes_deployment_v1 resources at root scope, which +# allows terraform test assertions to reference them without module prefix. + +output "labels" { + value = local.labels +} + +output "pod_annotations" { + value = local.pod_annotations +} + +output "workspace_pvc_name" { + value = kubernetes_persistent_volume_claim_v1.workspace.metadata[0].name +} + +output "agent_state_pvc_name" { + value = kubernetes_persistent_volume_claim_v1.agent_state.metadata[0].name +} + +output "extra_pvc_names" { + description = "Map of extra volume name => PVC claim name for persistent extra volumes" + value = { for k, v in kubernetes_persistent_volume_claim_v1.extra : k => v.metadata[0].name } +} + +output "init_script" { + value = local.init_script +} + +output "setup_script" { + value = local.setup_script +} + +output "dev_extra_mounts" { + description = "Active volume entries destined for the dev container" + value = local.dev_extra_mounts +} + +output "dind_extra_mounts" { + description = "Active volume entries destined for the dind sidecar" + value = local.dind_extra_mounts +} + +output "active_volumes" { + description = "All active (count > 0) volume entries, for declaring kubernetes Volume objects" + value = local.active_volumes +} + +output "deployment_type" { + value = var.deployment_type +} diff --git a/terraform/modules/workspace-pod/variables.tf b/terraform/modules/workspace-pod/variables.tf new file mode 100644 index 0000000..7b14ae5 --- /dev/null +++ b/terraform/modules/workspace-pod/variables.tf @@ -0,0 +1,162 @@ +# ─── Identity ──────────────────────────────────────────────────────────────── + +variable "workspace_name" { + type = string + description = "Coder workspace name" +} + +variable "workspace_id" { + type = string + description = "Coder workspace ID" +} + +variable "start_count" { + type = number + description = "data.coder_workspace.me.start_count — controls whether pod/deployment is created" +} + +variable "owner_name" { + type = string + description = "Workspace owner full name" +} + +variable "owner_email" { + type = string + description = "Workspace owner email" +} + +variable "owner_username" { + type = string + description = "Workspace owner username" +} + +# ─── Agent ─────────────────────────────────────────────────────────────────── + +variable "agent_token" { + type = string + sensitive = true + description = "Coder agent token" +} + +variable "agent_init_script" { + type = string + description = "Coder agent init_script (base64-encoded and executed via setpriv)" + default = "" +} + +variable "access_url" { + type = string + description = "Coder access URL" +} + +# ─── Workload type ─────────────────────────────────────────────────────────── + +variable "deployment_type" { + type = string + description = "Kubernetes workload type: 'deployment' or 'pod'" + default = "deployment" + + validation { + condition = contains(["deployment", "pod"], var.deployment_type) + error_message = "deployment_type must be 'deployment' or 'pod'" + } +} + +variable "restart_policy" { + type = string + description = "Pod restart policy (only used when deployment_type = 'pod')" + default = "Always" +} + +variable "termination_grace_period_seconds" { + type = number + description = "Termination grace period (only used when deployment_type = 'pod')" + default = 30 +} + +variable "do_not_disrupt" { + type = bool + description = "Add karpenter.sh/do-not-disrupt annotation" + default = false +} + +variable "docker_enabled" { + type = bool + description = "When false, the dind sidecar, DOCKER_HOST env on dev, and any containers=\"dind\" or \"both\" volume mounts are omitted from the rendered pod spec." + default = false +} + +# ─── Git / envbuilder ──────────────────────────────────────────────────────── + +variable "git_url" { + type = string + description = "Git URL for envbuilder (may include #refs/heads/branch suffix)" +} + +variable "setup_script" { + type = string + description = "Additional setup script lines appended after the standard chown" + default = "" +} + +# ─── Resource profiles ─────────────────────────────────────────────────────── + +variable "dev_resources" { + type = object({ + requests = map(string) + limits = map(string) + }) + description = "Resource requests/limits for the dev (envbuilder) container" + default = { + requests = { cpu = "500m", memory = "8Gi", "ephemeral-storage" = "1Gi" } + limits = { cpu = "16", memory = "32Gi", "ephemeral-storage" = "10Gi" } + } +} + +variable "dind_resources" { + type = object({ + requests = map(string) + limits = map(string) + }) + description = "Resource requests/limits for the dind sidecar" + default = { + requests = { cpu = "250m", memory = "1Gi", "ephemeral-storage" = "1Gi" } + limits = { cpu = "4", memory = "8Gi", "ephemeral-storage" = "10Gi" } + } +} + +# ─── Storage ───────────────────────────────────────────────────────────────── + +variable "workspace_size" { + type = string + description = "Size of the workspace PVC (e.g. '10Gi', '30Gi')" + default = "10Gi" +} + +variable "volumes" { + type = list(object({ + name = string + size = string + mount_path = string + persistent = optional(bool, true) + count = optional(number, 1) + # Which containers get this mount: "dev", "dind", or "both" + containers = optional(string, "dev") + })) + description = "Additional volumes beyond workspace and agent-state" + default = [] +} + +# ─── Labels ────────────────────────────────────────────────────────────────── + +variable "app_name" { + type = string + description = "Value for app.kubernetes.io/name label" + default = "coder-workspace" +} + +variable "name_prefix" { + type = string + description = "Prefix for Kubernetes resource names" + default = "workspace" +} diff --git a/terraform/workspace.tf b/terraform/workspace.tf new file mode 100644 index 0000000..16d3422 --- /dev/null +++ b/terraform/workspace.tf @@ -0,0 +1,396 @@ +# ─── Workspace Pod / Deployment ────────────────────────────────────────────── +# Declared at root scope so that terraform test assertions can reference +# kubernetes_pod_v1.workspace and kubernetes_deployment_v1.workspace directly +# without a module prefix. The workspace-pod module owns PVCs and computes +# labels/scripts; these resources consume its outputs. + +# ─── Deployment (long-lived workspaces) ────────────────────────────────────── + +resource "kubernetes_deployment_v1" "workspace" { + count = module.workspace.deployment_type == "deployment" ? data.coder_workspace.me.start_count : 0 + + metadata { + name = module.workspace.pod_name + namespace = "coder" + labels = module.workspace.labels + annotations = module.workspace.labels + } + + timeouts { + create = "15m" + } + + wait_for_rollout = true + + spec { + replicas = 1 + + strategy { + type = "Recreate" + } + + selector { + match_labels = module.workspace.labels + } + + template { + metadata { + labels = module.workspace.labels + annotations = module.workspace.pod_annotations + } + + spec { + affinity { + node_affinity { + preferred_during_scheduling_ignored_during_execution { + weight = 100 + preference { + match_expressions { + key = "role" + operator = "In" + values = ["workspace"] + } + } + } + } + } + + # ── Dev container ────────────────────────────────────────────── + container { + name = "dev" + image = "ghcr.io/coder/envbuilder:1.3.0" + + env { + name = "ENVBUILDER_GIT_URL" + value = local.git_url + } + env { + name = "ENVBUILDER_SKIP_REBUILD" + value = "false" + } + env { + name = "ENVBUILDER_FALLBACK_IMAGE" + value = "codercom/enterprise-base:ubuntu" + } + env { + name = "ENVBUILDER_SETUP_SCRIPT" + value = module.workspace.setup_script + } + env { + name = "ENVBUILDER_INIT_SCRIPT" + value = module.workspace.init_script + } + env { + name = "CODER_AGENT_TOKEN" + value = try(coder_agent.dev[0].token, "") + } + env { + name = "CODER_AGENT_URL" + value = data.coder_workspace.me.access_url + } + dynamic "env" { + for_each = local.docker ? [1] : [] + content { + name = "DOCKER_HOST" + value = "tcp://localhost:2375" + } + } + env { + name = "ENVBUILDER_CACHE_REPO" + value = "envbuilder-registry.coder.svc.cluster.local:5000/envbuilder-cache" + } + env { + name = "ENVBUILDER_INSECURE" + value = "true" + } + + volume_mount { + name = "workspace" + mount_path = "/workspaces" + } + + volume_mount { + name = "agent-state" + mount_path = "/persist/agent-state" + } + + dynamic "volume_mount" { + for_each = module.workspace.dev_extra_mounts + content { + name = volume_mount.value.name + mount_path = volume_mount.value.mount_path + } + } + + resources { + requests = local.dev_resources.requests + limits = local.dev_resources.limits + } + } + + # ── DinD sidecar ─────────────────────────────────────────────── + dynamic "container" { + for_each = local.docker ? [1] : [] + content { + name = "dind" + image = "docker:27-dind" + + security_context { + privileged = true + } + + env { + name = "DOCKER_TLS_CERTDIR" + value = "" + } + + port { + container_port = 2375 + protocol = "TCP" + } + + # Always mount workspace for docker-compose bind mounts + volume_mount { + name = "workspace" + mount_path = "/workspaces" + } + + dynamic "volume_mount" { + for_each = module.workspace.dind_extra_mounts + content { + name = volume_mount.value.name + mount_path = volume_mount.value.mount_path + } + } + + resources { + requests = local.dind_resources.requests + limits = local.dind_resources.limits + } + } + } + + # ── Standard volumes ───────────────────────────────────────── + volume { + name = "workspace" + persistent_volume_claim { + claim_name = module.workspace.workspace_pvc_name + } + } + + volume { + name = "agent-state" + persistent_volume_claim { + claim_name = module.workspace.agent_state_pvc_name + } + } + + # ── Extra persistent volumes ───────────────────────────────── + dynamic "volume" { + for_each = { for v in module.workspace.active_volumes : v.name => v if v.persistent } + content { + name = volume.key + persistent_volume_claim { + claim_name = module.workspace.extra_pvc_names[volume.key] + } + } + } + + # ── Extra ephemeral volumes ────────────────────────────────── + dynamic "volume" { + for_each = { for v in module.workspace.active_volumes : v.name => v if !v.persistent } + content { + name = volume.key + empty_dir { + size_limit = volume.value.size + } + } + } + } + } + } +} + +# ─── Pod (ephemeral tasks) ─────────────────────────────────────────────────── + +resource "kubernetes_pod_v1" "workspace" { + count = module.workspace.deployment_type == "pod" ? data.coder_workspace.me.start_count : 0 + + metadata { + name = module.workspace.pod_name + namespace = "coder" + labels = module.workspace.labels + annotations = module.workspace.pod_annotations + } + + spec { + affinity { + node_affinity { + preferred_during_scheduling_ignored_during_execution { + weight = 100 + preference { + match_expressions { + key = "role" + operator = "In" + values = ["workspace"] + } + } + } + } + } + + restart_policy = "Never" + termination_grace_period_seconds = 30 + + # ── Dev container ────────────────────────────────────────────── + container { + name = "dev" + image = "ghcr.io/coder/envbuilder:1.3.0" + + env { + name = "ENVBUILDER_GIT_URL" + value = local.git_url + } + env { + name = "ENVBUILDER_SKIP_REBUILD" + value = "false" + } + env { + name = "ENVBUILDER_FALLBACK_IMAGE" + value = "codercom/enterprise-base:ubuntu" + } + env { + name = "ENVBUILDER_SETUP_SCRIPT" + value = module.workspace.setup_script + } + env { + name = "ENVBUILDER_INIT_SCRIPT" + value = module.workspace.init_script + } + env { + name = "CODER_AGENT_TOKEN" + value = try(coder_agent.dev[0].token, "") + } + env { + name = "CODER_AGENT_URL" + value = data.coder_workspace.me.access_url + } + dynamic "env" { + for_each = local.docker ? [1] : [] + content { + name = "DOCKER_HOST" + value = "tcp://localhost:2375" + } + } + env { + name = "ENVBUILDER_CACHE_REPO" + value = "envbuilder-registry.coder.svc.cluster.local:5000/envbuilder-cache" + } + env { + name = "ENVBUILDER_INSECURE" + value = "true" + } + + volume_mount { + name = "workspace" + mount_path = "/workspaces" + } + + volume_mount { + name = "agent-state" + mount_path = "/persist/agent-state" + } + + dynamic "volume_mount" { + for_each = module.workspace.dev_extra_mounts + content { + name = volume_mount.value.name + mount_path = volume_mount.value.mount_path + } + } + + resources { + requests = local.dev_resources.requests + limits = local.dev_resources.limits + } + } + + # ── DinD sidecar ─────────────────────────────────────────────── + dynamic "container" { + for_each = local.docker ? [1] : [] + content { + name = "dind" + image = "docker:27-dind" + + security_context { + privileged = true + } + + env { + name = "DOCKER_TLS_CERTDIR" + value = "" + } + + port { + container_port = 2375 + protocol = "TCP" + } + + volume_mount { + name = "workspace" + mount_path = "/workspaces" + } + + dynamic "volume_mount" { + for_each = module.workspace.dind_extra_mounts + content { + name = volume_mount.value.name + mount_path = volume_mount.value.mount_path + } + } + + resources { + requests = local.dind_resources.requests + limits = local.dind_resources.limits + } + } + } + + # ── Standard volumes ───────────────────────────────────────── + volume { + name = "workspace" + persistent_volume_claim { + claim_name = module.workspace.workspace_pvc_name + } + } + + volume { + name = "agent-state" + persistent_volume_claim { + claim_name = module.workspace.agent_state_pvc_name + } + } + + # ── Extra persistent volumes ───────────────────────────────── + dynamic "volume" { + for_each = { for v in module.workspace.active_volumes : v.name => v if v.persistent } + content { + name = volume.key + persistent_volume_claim { + claim_name = module.workspace.extra_pvc_names[volume.key] + } + } + } + + # ── Extra ephemeral volumes ────────────────────────────────── + dynamic "volume" { + for_each = { for v in module.workspace.active_volumes : v.name => v if !v.persistent } + content { + name = volume.key + empty_dir { + size_limit = volume.value.size + } + } + } + } +} From 498039869c3d14fed4da3f8864c35fbc991d9645 Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 20:28:59 -0700 Subject: [PATCH 15/25] Revert "feat(terraform): gate dind sidecar and DOCKER_HOST env on docker_enabled" This reverts commit 6870e616c17a5197ecd9aa5e0f845aa8ae14039f. --- terraform/main.tf | 1 - terraform/modules/workspace-pod/main.tf | 130 ------ terraform/modules/workspace-pod/outputs.tf | 64 +-- terraform/modules/workspace-pod/variables.tf | 162 -------- terraform/workspace.tf | 396 ------------------- 5 files changed, 9 insertions(+), 744 deletions(-) delete mode 100644 terraform/modules/workspace-pod/main.tf delete mode 100644 terraform/modules/workspace-pod/variables.tf delete mode 100644 terraform/workspace.tf diff --git a/terraform/main.tf b/terraform/main.tf index 78b19e5..501d8e0 100644 --- a/terraform/main.tf +++ b/terraform/main.tf @@ -292,7 +292,6 @@ module "workspace" { restart_policy = "Never" termination_grace_period_seconds = 30 do_not_disrupt = true - docker_enabled = local.docker git_url = local.git_url workspace_size = "30Gi" app_name = "coder-task" diff --git a/terraform/modules/workspace-pod/main.tf b/terraform/modules/workspace-pod/main.tf deleted file mode 100644 index d8988ac..0000000 --- a/terraform/modules/workspace-pod/main.tf +++ /dev/null @@ -1,130 +0,0 @@ -terraform { - required_providers { - kubernetes = { source = "hashicorp/kubernetes", version = "~> 2.35.0" } - } -} - -# ─── Locals ────────────────────────────────────────────────────────────────── - -locals { - hostname = lower(var.workspace_name) - ws_id = substr(var.workspace_id, 0, 8) - slug = "${local.hostname}-${local.ws_id}" - - labels = { - "app.kubernetes.io/name" = var.app_name - "app.kubernetes.io/instance" = local.hostname - "app.kubernetes.io/managed-by" = "coder" - } - - annotations = { - "coder.com/owner-name" = var.owner_name - "coder.com/owner-email" = var.owner_email - "coder.com/owner" = var.owner_username - "coder.com/workspace" = var.workspace_name - } - - # Standard setup script + optional caller additions - setup_script = join("; ", compact([ - "chown -R 1000:1000 /workspaces /persist/agent-state", - var.setup_script, - ])) - - # Init script: drop to UID 1000 if root, then run agent init - init_script = join(" && ", [ - "echo ${base64encode(var.agent_init_script)} | base64 -d > /tmp/init.sh", - "chmod +x /tmp/init.sh", - "if [ \"$(id -u)\" = \"0\" ]; then export HOME=$(getent passwd 1000 | cut -d: -f6); exec setpriv --reuid=1000 --regid=1000 --init-groups /bin/bash /tmp/init.sh; else exec /bin/bash /tmp/init.sh; fi", - ]) - - # Filter volumes by count > 0 - active_volumes = [for v in var.volumes : v if v.count > 0] - - # Volumes for dev container (all standard + volumes with containers "dev" or "both") - dev_extra_mounts = [for v in local.active_volumes : v if contains(["dev", "both"], v.containers)] - dind_extra_mounts = [for v in local.active_volumes : v if contains(["dind", "both"], v.containers)] - - pod_annotations = var.do_not_disrupt ? merge(local.annotations, { - "karpenter.sh/do-not-disrupt" = "true" - }) : local.annotations -} - -# ─── Standard PVCs ─────────────────────────────────────────────────────────── - -resource "kubernetes_persistent_volume_claim_v1" "workspace" { - metadata { - name = "${var.name_prefix}-workspace-${local.slug}" - namespace = "coder" - labels = local.labels - annotations = local.annotations - } - - spec { - access_modes = ["ReadWriteOnce"] - storage_class_name = "gp3" - - resources { - requests = { - storage = var.workspace_size - } - } - } - - wait_until_bound = false - - lifecycle { - ignore_changes = [spec[0].resources[0].requests] - } -} - -resource "kubernetes_persistent_volume_claim_v1" "agent_state" { - metadata { - name = "${var.name_prefix}-agent-state-${local.slug}" - namespace = "coder" - labels = local.labels - annotations = local.annotations - } - - spec { - access_modes = ["ReadWriteOnce"] - storage_class_name = "gp3" - - resources { - requests = { - storage = "1Gi" - } - } - } - - wait_until_bound = false - - lifecycle { - ignore_changes = [spec[0].resources[0].requests] - } -} - -# ─── Extra PVCs (persistent volumes from var.volumes) ──────────────────────── - -resource "kubernetes_persistent_volume_claim_v1" "extra" { - for_each = { for v in local.active_volumes : v.name => v if v.persistent } - - metadata { - name = "${each.key}-${local.slug}" - namespace = "coder" - labels = local.labels - annotations = local.annotations - } - - spec { - access_modes = ["ReadWriteOnce"] - storage_class_name = "gp3" - - resources { - requests = { - storage = each.value.size - } - } - } - - wait_until_bound = false -} diff --git a/terraform/modules/workspace-pod/outputs.tf b/terraform/modules/workspace-pod/outputs.tf index d2ed56d..7222fb3 100644 --- a/terraform/modules/workspace-pod/outputs.tf +++ b/terraform/modules/workspace-pod/outputs.tf @@ -1,61 +1,15 @@ output "pod_name" { - description = "Computed name of the pod or deployment (used by callers and coder_metadata)" - value = "${var.name_prefix}-${local.slug}" + description = "Name of the created pod or deployment" + value = var.deployment_type == "deployment" ? ( + length(kubernetes_deployment_v1.workspace) > 0 ? kubernetes_deployment_v1.workspace[0].metadata[0].name : "" + ) : ( + length(kubernetes_pod_v1.workspace) > 0 ? kubernetes_pod_v1.workspace[0].metadata[0].name : "" + ) } +# Test-introspection outputs. Internal contract — consumed only by +# terraform/tests/*.tftest.hcl, not by the root module's production path. + output "docker_enabled" { value = var.docker_enabled } - -# ─── Outputs consumed by root-level kubernetes resources ───────────────────── -# These expose internal computed values so the root module can create the -# kubernetes_pod_v1 / kubernetes_deployment_v1 resources at root scope, which -# allows terraform test assertions to reference them without module prefix. - -output "labels" { - value = local.labels -} - -output "pod_annotations" { - value = local.pod_annotations -} - -output "workspace_pvc_name" { - value = kubernetes_persistent_volume_claim_v1.workspace.metadata[0].name -} - -output "agent_state_pvc_name" { - value = kubernetes_persistent_volume_claim_v1.agent_state.metadata[0].name -} - -output "extra_pvc_names" { - description = "Map of extra volume name => PVC claim name for persistent extra volumes" - value = { for k, v in kubernetes_persistent_volume_claim_v1.extra : k => v.metadata[0].name } -} - -output "init_script" { - value = local.init_script -} - -output "setup_script" { - value = local.setup_script -} - -output "dev_extra_mounts" { - description = "Active volume entries destined for the dev container" - value = local.dev_extra_mounts -} - -output "dind_extra_mounts" { - description = "Active volume entries destined for the dind sidecar" - value = local.dind_extra_mounts -} - -output "active_volumes" { - description = "All active (count > 0) volume entries, for declaring kubernetes Volume objects" - value = local.active_volumes -} - -output "deployment_type" { - value = var.deployment_type -} diff --git a/terraform/modules/workspace-pod/variables.tf b/terraform/modules/workspace-pod/variables.tf deleted file mode 100644 index 7b14ae5..0000000 --- a/terraform/modules/workspace-pod/variables.tf +++ /dev/null @@ -1,162 +0,0 @@ -# ─── Identity ──────────────────────────────────────────────────────────────── - -variable "workspace_name" { - type = string - description = "Coder workspace name" -} - -variable "workspace_id" { - type = string - description = "Coder workspace ID" -} - -variable "start_count" { - type = number - description = "data.coder_workspace.me.start_count — controls whether pod/deployment is created" -} - -variable "owner_name" { - type = string - description = "Workspace owner full name" -} - -variable "owner_email" { - type = string - description = "Workspace owner email" -} - -variable "owner_username" { - type = string - description = "Workspace owner username" -} - -# ─── Agent ─────────────────────────────────────────────────────────────────── - -variable "agent_token" { - type = string - sensitive = true - description = "Coder agent token" -} - -variable "agent_init_script" { - type = string - description = "Coder agent init_script (base64-encoded and executed via setpriv)" - default = "" -} - -variable "access_url" { - type = string - description = "Coder access URL" -} - -# ─── Workload type ─────────────────────────────────────────────────────────── - -variable "deployment_type" { - type = string - description = "Kubernetes workload type: 'deployment' or 'pod'" - default = "deployment" - - validation { - condition = contains(["deployment", "pod"], var.deployment_type) - error_message = "deployment_type must be 'deployment' or 'pod'" - } -} - -variable "restart_policy" { - type = string - description = "Pod restart policy (only used when deployment_type = 'pod')" - default = "Always" -} - -variable "termination_grace_period_seconds" { - type = number - description = "Termination grace period (only used when deployment_type = 'pod')" - default = 30 -} - -variable "do_not_disrupt" { - type = bool - description = "Add karpenter.sh/do-not-disrupt annotation" - default = false -} - -variable "docker_enabled" { - type = bool - description = "When false, the dind sidecar, DOCKER_HOST env on dev, and any containers=\"dind\" or \"both\" volume mounts are omitted from the rendered pod spec." - default = false -} - -# ─── Git / envbuilder ──────────────────────────────────────────────────────── - -variable "git_url" { - type = string - description = "Git URL for envbuilder (may include #refs/heads/branch suffix)" -} - -variable "setup_script" { - type = string - description = "Additional setup script lines appended after the standard chown" - default = "" -} - -# ─── Resource profiles ─────────────────────────────────────────────────────── - -variable "dev_resources" { - type = object({ - requests = map(string) - limits = map(string) - }) - description = "Resource requests/limits for the dev (envbuilder) container" - default = { - requests = { cpu = "500m", memory = "8Gi", "ephemeral-storage" = "1Gi" } - limits = { cpu = "16", memory = "32Gi", "ephemeral-storage" = "10Gi" } - } -} - -variable "dind_resources" { - type = object({ - requests = map(string) - limits = map(string) - }) - description = "Resource requests/limits for the dind sidecar" - default = { - requests = { cpu = "250m", memory = "1Gi", "ephemeral-storage" = "1Gi" } - limits = { cpu = "4", memory = "8Gi", "ephemeral-storage" = "10Gi" } - } -} - -# ─── Storage ───────────────────────────────────────────────────────────────── - -variable "workspace_size" { - type = string - description = "Size of the workspace PVC (e.g. '10Gi', '30Gi')" - default = "10Gi" -} - -variable "volumes" { - type = list(object({ - name = string - size = string - mount_path = string - persistent = optional(bool, true) - count = optional(number, 1) - # Which containers get this mount: "dev", "dind", or "both" - containers = optional(string, "dev") - })) - description = "Additional volumes beyond workspace and agent-state" - default = [] -} - -# ─── Labels ────────────────────────────────────────────────────────────────── - -variable "app_name" { - type = string - description = "Value for app.kubernetes.io/name label" - default = "coder-workspace" -} - -variable "name_prefix" { - type = string - description = "Prefix for Kubernetes resource names" - default = "workspace" -} diff --git a/terraform/workspace.tf b/terraform/workspace.tf deleted file mode 100644 index 16d3422..0000000 --- a/terraform/workspace.tf +++ /dev/null @@ -1,396 +0,0 @@ -# ─── Workspace Pod / Deployment ────────────────────────────────────────────── -# Declared at root scope so that terraform test assertions can reference -# kubernetes_pod_v1.workspace and kubernetes_deployment_v1.workspace directly -# without a module prefix. The workspace-pod module owns PVCs and computes -# labels/scripts; these resources consume its outputs. - -# ─── Deployment (long-lived workspaces) ────────────────────────────────────── - -resource "kubernetes_deployment_v1" "workspace" { - count = module.workspace.deployment_type == "deployment" ? data.coder_workspace.me.start_count : 0 - - metadata { - name = module.workspace.pod_name - namespace = "coder" - labels = module.workspace.labels - annotations = module.workspace.labels - } - - timeouts { - create = "15m" - } - - wait_for_rollout = true - - spec { - replicas = 1 - - strategy { - type = "Recreate" - } - - selector { - match_labels = module.workspace.labels - } - - template { - metadata { - labels = module.workspace.labels - annotations = module.workspace.pod_annotations - } - - spec { - affinity { - node_affinity { - preferred_during_scheduling_ignored_during_execution { - weight = 100 - preference { - match_expressions { - key = "role" - operator = "In" - values = ["workspace"] - } - } - } - } - } - - # ── Dev container ────────────────────────────────────────────── - container { - name = "dev" - image = "ghcr.io/coder/envbuilder:1.3.0" - - env { - name = "ENVBUILDER_GIT_URL" - value = local.git_url - } - env { - name = "ENVBUILDER_SKIP_REBUILD" - value = "false" - } - env { - name = "ENVBUILDER_FALLBACK_IMAGE" - value = "codercom/enterprise-base:ubuntu" - } - env { - name = "ENVBUILDER_SETUP_SCRIPT" - value = module.workspace.setup_script - } - env { - name = "ENVBUILDER_INIT_SCRIPT" - value = module.workspace.init_script - } - env { - name = "CODER_AGENT_TOKEN" - value = try(coder_agent.dev[0].token, "") - } - env { - name = "CODER_AGENT_URL" - value = data.coder_workspace.me.access_url - } - dynamic "env" { - for_each = local.docker ? [1] : [] - content { - name = "DOCKER_HOST" - value = "tcp://localhost:2375" - } - } - env { - name = "ENVBUILDER_CACHE_REPO" - value = "envbuilder-registry.coder.svc.cluster.local:5000/envbuilder-cache" - } - env { - name = "ENVBUILDER_INSECURE" - value = "true" - } - - volume_mount { - name = "workspace" - mount_path = "/workspaces" - } - - volume_mount { - name = "agent-state" - mount_path = "/persist/agent-state" - } - - dynamic "volume_mount" { - for_each = module.workspace.dev_extra_mounts - content { - name = volume_mount.value.name - mount_path = volume_mount.value.mount_path - } - } - - resources { - requests = local.dev_resources.requests - limits = local.dev_resources.limits - } - } - - # ── DinD sidecar ─────────────────────────────────────────────── - dynamic "container" { - for_each = local.docker ? [1] : [] - content { - name = "dind" - image = "docker:27-dind" - - security_context { - privileged = true - } - - env { - name = "DOCKER_TLS_CERTDIR" - value = "" - } - - port { - container_port = 2375 - protocol = "TCP" - } - - # Always mount workspace for docker-compose bind mounts - volume_mount { - name = "workspace" - mount_path = "/workspaces" - } - - dynamic "volume_mount" { - for_each = module.workspace.dind_extra_mounts - content { - name = volume_mount.value.name - mount_path = volume_mount.value.mount_path - } - } - - resources { - requests = local.dind_resources.requests - limits = local.dind_resources.limits - } - } - } - - # ── Standard volumes ───────────────────────────────────────── - volume { - name = "workspace" - persistent_volume_claim { - claim_name = module.workspace.workspace_pvc_name - } - } - - volume { - name = "agent-state" - persistent_volume_claim { - claim_name = module.workspace.agent_state_pvc_name - } - } - - # ── Extra persistent volumes ───────────────────────────────── - dynamic "volume" { - for_each = { for v in module.workspace.active_volumes : v.name => v if v.persistent } - content { - name = volume.key - persistent_volume_claim { - claim_name = module.workspace.extra_pvc_names[volume.key] - } - } - } - - # ── Extra ephemeral volumes ────────────────────────────────── - dynamic "volume" { - for_each = { for v in module.workspace.active_volumes : v.name => v if !v.persistent } - content { - name = volume.key - empty_dir { - size_limit = volume.value.size - } - } - } - } - } - } -} - -# ─── Pod (ephemeral tasks) ─────────────────────────────────────────────────── - -resource "kubernetes_pod_v1" "workspace" { - count = module.workspace.deployment_type == "pod" ? data.coder_workspace.me.start_count : 0 - - metadata { - name = module.workspace.pod_name - namespace = "coder" - labels = module.workspace.labels - annotations = module.workspace.pod_annotations - } - - spec { - affinity { - node_affinity { - preferred_during_scheduling_ignored_during_execution { - weight = 100 - preference { - match_expressions { - key = "role" - operator = "In" - values = ["workspace"] - } - } - } - } - } - - restart_policy = "Never" - termination_grace_period_seconds = 30 - - # ── Dev container ────────────────────────────────────────────── - container { - name = "dev" - image = "ghcr.io/coder/envbuilder:1.3.0" - - env { - name = "ENVBUILDER_GIT_URL" - value = local.git_url - } - env { - name = "ENVBUILDER_SKIP_REBUILD" - value = "false" - } - env { - name = "ENVBUILDER_FALLBACK_IMAGE" - value = "codercom/enterprise-base:ubuntu" - } - env { - name = "ENVBUILDER_SETUP_SCRIPT" - value = module.workspace.setup_script - } - env { - name = "ENVBUILDER_INIT_SCRIPT" - value = module.workspace.init_script - } - env { - name = "CODER_AGENT_TOKEN" - value = try(coder_agent.dev[0].token, "") - } - env { - name = "CODER_AGENT_URL" - value = data.coder_workspace.me.access_url - } - dynamic "env" { - for_each = local.docker ? [1] : [] - content { - name = "DOCKER_HOST" - value = "tcp://localhost:2375" - } - } - env { - name = "ENVBUILDER_CACHE_REPO" - value = "envbuilder-registry.coder.svc.cluster.local:5000/envbuilder-cache" - } - env { - name = "ENVBUILDER_INSECURE" - value = "true" - } - - volume_mount { - name = "workspace" - mount_path = "/workspaces" - } - - volume_mount { - name = "agent-state" - mount_path = "/persist/agent-state" - } - - dynamic "volume_mount" { - for_each = module.workspace.dev_extra_mounts - content { - name = volume_mount.value.name - mount_path = volume_mount.value.mount_path - } - } - - resources { - requests = local.dev_resources.requests - limits = local.dev_resources.limits - } - } - - # ── DinD sidecar ─────────────────────────────────────────────── - dynamic "container" { - for_each = local.docker ? [1] : [] - content { - name = "dind" - image = "docker:27-dind" - - security_context { - privileged = true - } - - env { - name = "DOCKER_TLS_CERTDIR" - value = "" - } - - port { - container_port = 2375 - protocol = "TCP" - } - - volume_mount { - name = "workspace" - mount_path = "/workspaces" - } - - dynamic "volume_mount" { - for_each = module.workspace.dind_extra_mounts - content { - name = volume_mount.value.name - mount_path = volume_mount.value.mount_path - } - } - - resources { - requests = local.dind_resources.requests - limits = local.dind_resources.limits - } - } - } - - # ── Standard volumes ───────────────────────────────────────── - volume { - name = "workspace" - persistent_volume_claim { - claim_name = module.workspace.workspace_pvc_name - } - } - - volume { - name = "agent-state" - persistent_volume_claim { - claim_name = module.workspace.agent_state_pvc_name - } - } - - # ── Extra persistent volumes ───────────────────────────────── - dynamic "volume" { - for_each = { for v in module.workspace.active_volumes : v.name => v if v.persistent } - content { - name = volume.key - persistent_volume_claim { - claim_name = module.workspace.extra_pvc_names[volume.key] - } - } - } - - # ── Extra ephemeral volumes ────────────────────────────────── - dynamic "volume" { - for_each = { for v in module.workspace.active_volumes : v.name => v if !v.persistent } - content { - name = volume.key - empty_dir { - size_limit = volume.value.size - } - } - } - } -} From cd6f3321f9eb97501827d8621121783e7eb1b821 Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 20:32:31 -0700 Subject: [PATCH 16/25] feat(terraform): gate dind sidecar on docker_enabled via module outputs --- terraform/main.tf | 1 + terraform/modules/workspace-pod/main.tf | 521 +++++++++++++++++++ terraform/modules/workspace-pod/outputs.tf | 11 + terraform/modules/workspace-pod/variables.tf | 162 ++++++ terraform/tests/task-metadata.tftest.hcl | 8 +- 5 files changed, 699 insertions(+), 4 deletions(-) create mode 100644 terraform/modules/workspace-pod/main.tf create mode 100644 terraform/modules/workspace-pod/variables.tf diff --git a/terraform/main.tf b/terraform/main.tf index 501d8e0..78b19e5 100644 --- a/terraform/main.tf +++ b/terraform/main.tf @@ -292,6 +292,7 @@ module "workspace" { restart_policy = "Never" termination_grace_period_seconds = 30 do_not_disrupt = true + docker_enabled = local.docker git_url = local.git_url workspace_size = "30Gi" app_name = "coder-task" diff --git a/terraform/modules/workspace-pod/main.tf b/terraform/modules/workspace-pod/main.tf new file mode 100644 index 0000000..f3f1ec1 --- /dev/null +++ b/terraform/modules/workspace-pod/main.tf @@ -0,0 +1,521 @@ +terraform { + required_providers { + kubernetes = { source = "hashicorp/kubernetes", version = "~> 2.35.0" } + } +} + +# ─── Locals ────────────────────────────────────────────────────────────────── + +locals { + hostname = lower(var.workspace_name) + ws_id = substr(var.workspace_id, 0, 8) + slug = "${local.hostname}-${local.ws_id}" + + labels = { + "app.kubernetes.io/name" = var.app_name + "app.kubernetes.io/instance" = local.hostname + "app.kubernetes.io/managed-by" = "coder" + } + + annotations = { + "coder.com/owner-name" = var.owner_name + "coder.com/owner-email" = var.owner_email + "coder.com/owner" = var.owner_username + "coder.com/workspace" = var.workspace_name + } + + # Standard setup script + optional caller additions + setup_script = join("; ", compact([ + "chown -R 1000:1000 /workspaces /persist/agent-state", + var.setup_script, + ])) + + # Init script: drop to UID 1000 if root, then run agent init + init_script = join(" && ", [ + "echo ${base64encode(var.agent_init_script)} | base64 -d > /tmp/init.sh", + "chmod +x /tmp/init.sh", + "if [ \"$(id -u)\" = \"0\" ]; then export HOME=$(getent passwd 1000 | cut -d: -f6); exec setpriv --reuid=1000 --regid=1000 --init-groups /bin/bash /tmp/init.sh; else exec /bin/bash /tmp/init.sh; fi", + ]) + + # Filter volumes by count > 0 + active_volumes = [for v in var.volumes : v if v.count > 0] + + # Volumes for dev container (all standard + volumes with containers "dev" or "both") + dev_extra_mounts = [for v in local.active_volumes : v if contains(["dev", "both"], v.containers)] + dind_extra_mounts = [for v in local.active_volumes : v if contains(["dind", "both"], v.containers)] + + pod_annotations = var.do_not_disrupt ? merge(local.annotations, { + "karpenter.sh/do-not-disrupt" = "true" + }) : local.annotations +} + +# ─── Standard PVCs ─────────────────────────────────────────────────────────── + +resource "kubernetes_persistent_volume_claim_v1" "workspace" { + metadata { + name = "${var.name_prefix}-workspace-${local.slug}" + namespace = "coder" + labels = local.labels + annotations = local.annotations + } + + spec { + access_modes = ["ReadWriteOnce"] + storage_class_name = "gp3" + + resources { + requests = { + storage = var.workspace_size + } + } + } + + wait_until_bound = false + + lifecycle { + ignore_changes = [spec[0].resources[0].requests] + } +} + +resource "kubernetes_persistent_volume_claim_v1" "agent_state" { + metadata { + name = "${var.name_prefix}-agent-state-${local.slug}" + namespace = "coder" + labels = local.labels + annotations = local.annotations + } + + spec { + access_modes = ["ReadWriteOnce"] + storage_class_name = "gp3" + + resources { + requests = { + storage = "1Gi" + } + } + } + + wait_until_bound = false + + lifecycle { + ignore_changes = [spec[0].resources[0].requests] + } +} + +# ─── Extra PVCs (persistent volumes from var.volumes) ──────────────────────── + +resource "kubernetes_persistent_volume_claim_v1" "extra" { + for_each = { for v in local.active_volumes : v.name => v if v.persistent } + + metadata { + name = "${each.key}-${local.slug}" + namespace = "coder" + labels = local.labels + annotations = local.annotations + } + + spec { + access_modes = ["ReadWriteOnce"] + storage_class_name = "gp3" + + resources { + requests = { + storage = each.value.size + } + } + } + + wait_until_bound = false +} + +# ─── Deployment (long-lived workspaces) ────────────────────────────────────── + +resource "kubernetes_deployment_v1" "workspace" { + count = var.deployment_type == "deployment" ? var.start_count : 0 + + metadata { + name = "${var.name_prefix}-${local.slug}" + namespace = "coder" + labels = local.labels + annotations = local.labels + } + + timeouts { + create = "15m" + } + + wait_for_rollout = true + + spec { + replicas = 1 + + strategy { + type = "Recreate" + } + + selector { + match_labels = local.labels + } + + template { + metadata { + labels = local.labels + annotations = local.pod_annotations + } + + spec { + affinity { + node_affinity { + preferred_during_scheduling_ignored_during_execution { + weight = 100 + preference { + match_expressions { + key = "role" + operator = "In" + values = ["workspace"] + } + } + } + } + } + + # ── Dev container ────────────────────────────────────────────── + container { + name = "dev" + image = "ghcr.io/coder/envbuilder:1.3.0" + + env { + name = "ENVBUILDER_GIT_URL" + value = var.git_url + } + env { + name = "ENVBUILDER_SKIP_REBUILD" + value = "false" + } + env { + name = "ENVBUILDER_FALLBACK_IMAGE" + value = "codercom/enterprise-base:ubuntu" + } + env { + name = "ENVBUILDER_SETUP_SCRIPT" + value = local.setup_script + } + env { + name = "ENVBUILDER_INIT_SCRIPT" + value = local.init_script + } + env { + name = "CODER_AGENT_TOKEN" + value = var.agent_token + } + env { + name = "CODER_AGENT_URL" + value = var.access_url + } + dynamic "env" { + for_each = var.docker_enabled ? [1] : [] + content { + name = "DOCKER_HOST" + value = "tcp://localhost:2375" + } + } + env { + name = "ENVBUILDER_CACHE_REPO" + value = "envbuilder-registry.coder.svc.cluster.local:5000/envbuilder-cache" + } + env { + name = "ENVBUILDER_INSECURE" + value = "true" + } + + volume_mount { + name = "workspace" + mount_path = "/workspaces" + } + + volume_mount { + name = "agent-state" + mount_path = "/persist/agent-state" + } + + dynamic "volume_mount" { + for_each = local.dev_extra_mounts + content { + name = volume_mount.value.name + mount_path = volume_mount.value.mount_path + } + } + + resources { + requests = var.dev_resources.requests + limits = var.dev_resources.limits + } + } + + # ── DinD sidecar ─────────────────────────────────────────────── + dynamic "container" { + for_each = var.docker_enabled ? [1] : [] + content { + name = "dind" + image = "docker:27-dind" + + security_context { + privileged = true + } + + env { + name = "DOCKER_TLS_CERTDIR" + value = "" + } + + port { + container_port = 2375 + protocol = "TCP" + } + + # Always mount workspace for docker-compose bind mounts + volume_mount { + name = "workspace" + mount_path = "/workspaces" + } + + dynamic "volume_mount" { + for_each = local.dind_extra_mounts + content { + name = volume_mount.value.name + mount_path = volume_mount.value.mount_path + } + } + + resources { + requests = var.dind_resources.requests + limits = var.dind_resources.limits + } + } + } + + # ── Standard volumes ───────────────────────────────────────── + volume { + name = "workspace" + persistent_volume_claim { + claim_name = kubernetes_persistent_volume_claim_v1.workspace.metadata[0].name + } + } + + volume { + name = "agent-state" + persistent_volume_claim { + claim_name = kubernetes_persistent_volume_claim_v1.agent_state.metadata[0].name + } + } + + # ── Extra persistent volumes ───────────────────────────────── + dynamic "volume" { + for_each = { for v in local.active_volumes : v.name => v if v.persistent } + content { + name = volume.key + persistent_volume_claim { + claim_name = kubernetes_persistent_volume_claim_v1.extra[volume.key].metadata[0].name + } + } + } + + # ── Extra ephemeral volumes ────────────────────────────────── + dynamic "volume" { + for_each = { for v in local.active_volumes : v.name => v if !v.persistent } + content { + name = volume.key + empty_dir { + size_limit = volume.value.size + } + } + } + } + } + } +} + +# ─── Pod (ephemeral tasks) ─────────────────────────────────────────────────── + +resource "kubernetes_pod_v1" "workspace" { + count = var.deployment_type == "pod" ? var.start_count : 0 + + metadata { + name = "${var.name_prefix}-${local.slug}" + namespace = "coder" + labels = local.labels + annotations = local.pod_annotations + } + + spec { + affinity { + node_affinity { + preferred_during_scheduling_ignored_during_execution { + weight = 100 + preference { + match_expressions { + key = "role" + operator = "In" + values = ["workspace"] + } + } + } + } + } + + restart_policy = var.restart_policy + termination_grace_period_seconds = var.termination_grace_period_seconds + + # ── Dev container ────────────────────────────────────────────── + container { + name = "dev" + image = "ghcr.io/coder/envbuilder:1.3.0" + + env { + name = "ENVBUILDER_GIT_URL" + value = var.git_url + } + env { + name = "ENVBUILDER_SKIP_REBUILD" + value = "false" + } + env { + name = "ENVBUILDER_FALLBACK_IMAGE" + value = "codercom/enterprise-base:ubuntu" + } + env { + name = "ENVBUILDER_SETUP_SCRIPT" + value = local.setup_script + } + env { + name = "ENVBUILDER_INIT_SCRIPT" + value = local.init_script + } + env { + name = "CODER_AGENT_TOKEN" + value = var.agent_token + } + env { + name = "CODER_AGENT_URL" + value = var.access_url + } + dynamic "env" { + for_each = var.docker_enabled ? [1] : [] + content { + name = "DOCKER_HOST" + value = "tcp://localhost:2375" + } + } + env { + name = "ENVBUILDER_CACHE_REPO" + value = "envbuilder-registry.coder.svc.cluster.local:5000/envbuilder-cache" + } + env { + name = "ENVBUILDER_INSECURE" + value = "true" + } + + volume_mount { + name = "workspace" + mount_path = "/workspaces" + } + + volume_mount { + name = "agent-state" + mount_path = "/persist/agent-state" + } + + dynamic "volume_mount" { + for_each = local.dev_extra_mounts + content { + name = volume_mount.value.name + mount_path = volume_mount.value.mount_path + } + } + + resources { + requests = var.dev_resources.requests + limits = var.dev_resources.limits + } + } + + # ── DinD sidecar ─────────────────────────────────────────────── + dynamic "container" { + for_each = var.docker_enabled ? [1] : [] + content { + name = "dind" + image = "docker:27-dind" + + security_context { + privileged = true + } + + env { + name = "DOCKER_TLS_CERTDIR" + value = "" + } + + port { + container_port = 2375 + protocol = "TCP" + } + + volume_mount { + name = "workspace" + mount_path = "/workspaces" + } + + dynamic "volume_mount" { + for_each = local.dind_extra_mounts + content { + name = volume_mount.value.name + mount_path = volume_mount.value.mount_path + } + } + + resources { + requests = var.dind_resources.requests + limits = var.dind_resources.limits + } + } + } + + # ── Standard volumes ───────────────────────────────────────── + volume { + name = "workspace" + persistent_volume_claim { + claim_name = kubernetes_persistent_volume_claim_v1.workspace.metadata[0].name + } + } + + volume { + name = "agent-state" + persistent_volume_claim { + claim_name = kubernetes_persistent_volume_claim_v1.agent_state.metadata[0].name + } + } + + # ── Extra persistent volumes ───────────────────────────────── + dynamic "volume" { + for_each = { for v in local.active_volumes : v.name => v if v.persistent } + content { + name = volume.key + persistent_volume_claim { + claim_name = kubernetes_persistent_volume_claim_v1.extra[volume.key].metadata[0].name + } + } + } + + # ── Extra ephemeral volumes ────────────────────────────────── + dynamic "volume" { + for_each = { for v in local.active_volumes : v.name => v if !v.persistent } + content { + name = volume.key + empty_dir { + size_limit = volume.value.size + } + } + } + } +} diff --git a/terraform/modules/workspace-pod/outputs.tf b/terraform/modules/workspace-pod/outputs.tf index 7222fb3..4152a3d 100644 --- a/terraform/modules/workspace-pod/outputs.tf +++ b/terraform/modules/workspace-pod/outputs.tf @@ -13,3 +13,14 @@ output "pod_name" { output "docker_enabled" { value = var.docker_enabled } + +# Test-introspection output. Exposes the rendered container list from the +# active resource (pod or deployment) so terraform tests can assert on +# container presence and env without addressing module internals directly. +output "pod_containers" { + value = var.deployment_type == "pod" ? ( + length(kubernetes_pod_v1.workspace) > 0 ? kubernetes_pod_v1.workspace[0].spec[0].container : [] + ) : ( + length(kubernetes_deployment_v1.workspace) > 0 ? kubernetes_deployment_v1.workspace[0].spec[0].template[0].spec[0].container : [] + ) +} diff --git a/terraform/modules/workspace-pod/variables.tf b/terraform/modules/workspace-pod/variables.tf new file mode 100644 index 0000000..e5e1fec --- /dev/null +++ b/terraform/modules/workspace-pod/variables.tf @@ -0,0 +1,162 @@ +# ─── Identity ──────────────────────────────────────────────────────────────── + +variable "workspace_name" { + type = string + description = "Coder workspace name" +} + +variable "workspace_id" { + type = string + description = "Coder workspace ID" +} + +variable "start_count" { + type = number + description = "data.coder_workspace.me.start_count — controls whether pod/deployment is created" +} + +variable "owner_name" { + type = string + description = "Workspace owner full name" +} + +variable "owner_email" { + type = string + description = "Workspace owner email" +} + +variable "owner_username" { + type = string + description = "Workspace owner username" +} + +# ─── Agent ─────────────────────────────────────────────────────────────────── + +variable "agent_token" { + type = string + sensitive = true + description = "Coder agent token" +} + +variable "agent_init_script" { + type = string + description = "Coder agent init_script (base64-encoded and executed via setpriv)" + default = "" +} + +variable "access_url" { + type = string + description = "Coder access URL" +} + +# ─── Workload type ─────────────────────────────────────────────────────────── + +variable "deployment_type" { + type = string + description = "Kubernetes workload type: 'deployment' or 'pod'" + default = "deployment" + + validation { + condition = contains(["deployment", "pod"], var.deployment_type) + error_message = "deployment_type must be 'deployment' or 'pod'" + } +} + +variable "restart_policy" { + type = string + description = "Pod restart policy (only used when deployment_type = 'pod')" + default = "Always" +} + +variable "termination_grace_period_seconds" { + type = number + description = "Termination grace period (only used when deployment_type = 'pod')" + default = 30 +} + +variable "do_not_disrupt" { + type = bool + description = "Add karpenter.sh/do-not-disrupt annotation" + default = false +} + +variable "docker_enabled" { + type = bool + description = "When false, the dind sidecar, DOCKER_HOST env on dev, and any containers=\"dind\" or \"both\" volume mounts are omitted from the rendered pod spec." + default = true +} + +# ─── Git / envbuilder ──────────────────────────────────────────────────────── + +variable "git_url" { + type = string + description = "Git URL for envbuilder (may include #refs/heads/branch suffix)" +} + +variable "setup_script" { + type = string + description = "Additional setup script lines appended after the standard chown" + default = "" +} + +# ─── Resource profiles ─────────────────────────────────────────────────────── + +variable "dev_resources" { + type = object({ + requests = map(string) + limits = map(string) + }) + description = "Resource requests/limits for the dev (envbuilder) container" + default = { + requests = { cpu = "500m", memory = "8Gi", "ephemeral-storage" = "1Gi" } + limits = { cpu = "16", memory = "32Gi", "ephemeral-storage" = "10Gi" } + } +} + +variable "dind_resources" { + type = object({ + requests = map(string) + limits = map(string) + }) + description = "Resource requests/limits for the dind sidecar" + default = { + requests = { cpu = "250m", memory = "1Gi", "ephemeral-storage" = "1Gi" } + limits = { cpu = "4", memory = "8Gi", "ephemeral-storage" = "10Gi" } + } +} + +# ─── Storage ───────────────────────────────────────────────────────────────── + +variable "workspace_size" { + type = string + description = "Size of the workspace PVC (e.g. '10Gi', '30Gi')" + default = "10Gi" +} + +variable "volumes" { + type = list(object({ + name = string + size = string + mount_path = string + persistent = optional(bool, true) + count = optional(number, 1) + # Which containers get this mount: "dev", "dind", or "both" + containers = optional(string, "dev") + })) + description = "Additional volumes beyond workspace and agent-state" + default = [] +} + +# ─── Labels ────────────────────────────────────────────────────────────────── + +variable "app_name" { + type = string + description = "Value for app.kubernetes.io/name label" + default = "coder-workspace" +} + +variable "name_prefix" { + type = string + description = "Prefix for Kubernetes resource names" + default = "workspace" +} diff --git a/terraform/tests/task-metadata.tftest.hcl b/terraform/tests/task-metadata.tftest.hcl index dc02271..e7d797f 100644 --- a/terraform/tests/task-metadata.tftest.hcl +++ b/terraform/tests/task-metadata.tftest.hcl @@ -509,7 +509,7 @@ run "docker_false_by_default" { error_message = "docker must default to false when absent, and the workspace-pod module must receive docker_enabled=false (EARS-11)" } assert { - condition = length([for c in kubernetes_pod_v1.workspace[0].spec[0].container : c if c.name == "dind"]) == 0 + condition = length([for c in module.workspace.pod_containers : c if c.name == "dind"]) == 0 error_message = "dind container must not be rendered when docker=false (EARS-11)" } } @@ -538,11 +538,11 @@ run "docker_true_enables_sidecar" { error_message = "docker=true must propagate to workspace-pod.docker_enabled (EARS-12)" } assert { - condition = length([for c in kubernetes_pod_v1.workspace[0].spec[0].container : c if c.name == "dind"]) == 1 + condition = length([for c in module.workspace.pod_containers : c if c.name == "dind"]) == 1 error_message = "dind container must be rendered exactly once when docker=true (EARS-12)" } assert { - condition = length([for c in kubernetes_pod_v1.workspace[0].spec[0].container : c if c.name == "dev" && length([for e in c.env : e if e.name == "DOCKER_HOST"]) > 0]) == 1 + condition = length([for c in module.workspace.pod_containers : c if c.name == "dev" && length([for e in c.env : e if e.name == "DOCKER_HOST"]) > 0]) == 1 error_message = "DOCKER_HOST env must be present on dev container when docker=true (EARS-12)" } } @@ -568,7 +568,7 @@ run "docker_false_sets_no_docker_host" { # EARS-11: when docker=false, DOCKER_HOST must NOT appear on the dev container assert { - condition = length([for c in kubernetes_pod_v1.workspace[0].spec[0].container : c if c.name == "dev" && length([for e in c.env : e if e.name == "DOCKER_HOST"]) > 0]) == 0 + condition = length([for c in module.workspace.pod_containers : c if c.name == "dev" && length([for e in c.env : e if e.name == "DOCKER_HOST"]) > 0]) == 0 error_message = "DOCKER_HOST env must not be set on dev container when docker=false (EARS-11)" } } From 9fab391dd9a8a69dff01b407aae3053d407e97dc Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 20:36:01 -0700 Subject: [PATCH 17/25] fix(terraform): redact sensitive env values from pod_containers output Co-Authored-By: Claude Sonnet 4.6 --- terraform/modules/workspace-pod/outputs.tf | 28 +++++++++++++++------- terraform/tests/task-metadata.tftest.hcl | 8 +++++++ 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/terraform/modules/workspace-pod/outputs.tf b/terraform/modules/workspace-pod/outputs.tf index 4152a3d..209d1f2 100644 --- a/terraform/modules/workspace-pod/outputs.tf +++ b/terraform/modules/workspace-pod/outputs.tf @@ -11,16 +11,26 @@ output "pod_name" { # terraform/tests/*.tftest.hcl, not by the root module's production path. output "docker_enabled" { - value = var.docker_enabled + description = "Test-only. Echoes var.docker_enabled so root-level tests can assert on the value the module received." + value = var.docker_enabled } -# Test-introspection output. Exposes the rendered container list from the -# active resource (pod or deployment) so terraform tests can assert on -# container presence and env without addressing module internals directly. +# Test-introspection only. Consumed by terraform/tests/*.tftest.hcl. +# Deliberately redacted projection — ONLY container names and env VAR NAMES +# are exposed. Env values are omitted because they include +# CODER_AGENT_TOKEN, GITHUB_TOKEN, and other sensitive strings; re-exporting +# those via an output would print them in plaintext in CI logs and +# `terraform output` listings. output "pod_containers" { - value = var.deployment_type == "pod" ? ( - length(kubernetes_pod_v1.workspace) > 0 ? kubernetes_pod_v1.workspace[0].spec[0].container : [] - ) : ( - length(kubernetes_deployment_v1.workspace) > 0 ? kubernetes_deployment_v1.workspace[0].spec[0].template[0].spec[0].container : [] - ) + description = "Test-only. Redacted container list: name + env var names only. Not a production contract." + value = [ + for c in(var.deployment_type == "pod" ? ( + length(kubernetes_pod_v1.workspace) > 0 ? kubernetes_pod_v1.workspace[0].spec[0].container : [] + ) : ( + length(kubernetes_deployment_v1.workspace) > 0 ? kubernetes_deployment_v1.workspace[0].spec[0].template[0].spec[0].container : [] + )) : { + name = c.name + env = [for e in c.env : { name = e.name }] + } + ] } diff --git a/terraform/tests/task-metadata.tftest.hcl b/terraform/tests/task-metadata.tftest.hcl index e7d797f..8e484d9 100644 --- a/terraform/tests/task-metadata.tftest.hcl +++ b/terraform/tests/task-metadata.tftest.hcl @@ -482,6 +482,14 @@ run "size_invalid_fails_precondition" { expect_failures = [resource.coder_agent.dev] } +# COVERAGE NOTE: the three docker-gating runs below all exercise +# var.deployment_type == "pod" (the root template's default). The +# workspace-pod module also gates dind/DOCKER_HOST inside a mirrored +# kubernetes_deployment_v1 branch that is NOT exercised by these tests. +# If the template ever enables `deployment_type = "deployment"`, add a +# module-level tftest under terraform/modules/workspace-pod/tests/ that +# asserts the same invariants with deployment_type="deployment". + # ─── Docker sidecar gating ────────────────────────────────────────────────── run "docker_false_by_default" { From 6b221ebadc5d2b60989561f1607ba97fd6776554 Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 20:38:53 -0700 Subject: [PATCH 18/25] test(terraform): add failing volume-mapping tests Co-Authored-By: Claude Sonnet 4.6 --- terraform/outputs.tf | 8 ++ terraform/tests/task-metadata.tftest.hcl | 145 +++++++++++++++++++++++ 2 files changed, 153 insertions(+) diff --git a/terraform/outputs.tf b/terraform/outputs.tf index 3b2dbf2..4a18ebe 100644 --- a/terraform/outputs.tf +++ b/terraform/outputs.tf @@ -28,3 +28,11 @@ output "dind_resources" { output "docker_enabled" { value = try(module.workspace.docker_enabled, null) } + +output "all_volumes" { + value = try(local.all_volumes, []) +} + +output "mapped_extra_volumes" { + value = try(local.mapped_extra_volumes, []) +} diff --git a/terraform/tests/task-metadata.tftest.hcl b/terraform/tests/task-metadata.tftest.hcl index 8e484d9..97f0839 100644 --- a/terraform/tests/task-metadata.tftest.hcl +++ b/terraform/tests/task-metadata.tftest.hcl @@ -581,3 +581,148 @@ run "docker_false_sets_no_docker_host" { } } +# ─── Volume mapping ────────────────────────────────────────────────────────── + +run "extra_volumes_mapped_to_module_shape" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000019" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { + prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"extra_volumes\":[{\"path\":\"/home/runner/cache\",\"size\":\"5Gi\"}]}" + } + } + + assert { + condition = length(output.mapped_extra_volumes) == 1 + error_message = "one extra_volume entry must produce one module volume (EARS-13)" + } + assert { + condition = output.mapped_extra_volumes[0].mount_path == "/home/runner/cache" + error_message = "mount_path must equal input path (EARS-13)" + } + assert { + condition = output.mapped_extra_volumes[0].persistent == true + error_message = "extra volumes must be persistent by default (per user clarification)" + } + assert { + condition = output.mapped_extra_volumes[0].containers == "dev" + error_message = "extra volumes must mount on dev container only" + } + assert { + condition = output.mapped_extra_volumes[0].size == "5Gi" + error_message = "extra volume size must pass through verbatim" + } + assert { + condition = output.mapped_extra_volumes[0].name == "home-runner-cache" + error_message = "PVC name must be input path with leading slash trimmed and remaining slashes replaced with dashes" + } +} + +run "extra_volumes_default_empty" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000020" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { + prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\"}" + } + } + + # When extra_volumes is omitted, mapped_extra_volumes must be empty and + # all_volumes must be empty (docker is absent => false => no docker-cache). + assert { + condition = length(output.mapped_extra_volumes) == 0 + error_message = "mapped_extra_volumes must be empty when extra_volumes is absent" + } + assert { + condition = length(output.all_volumes) == 0 + error_message = "all_volumes must be empty when docker=false and no extra_volumes" + } +} + +run "docker_cache_volume_present_when_docker_true" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000021" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { + prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"docker\":true}" + } + } + + # Exactly one volume named "docker-cache" must be present, mounted on dind. + assert { + condition = length([for v in output.all_volumes : v if v.name == "docker-cache"]) == 1 + error_message = "all_volumes must include exactly one docker-cache volume when docker=true (EARS-12)" + } + assert { + condition = [for v in output.all_volumes : v if v.name == "docker-cache"][0].containers == "dind" + error_message = "docker-cache volume must mount on dind container (EARS-12)" + } + assert { + condition = [for v in output.all_volumes : v if v.name == "docker-cache"][0].persistent == false + error_message = "docker-cache volume must be ephemeral (not persistent)" + } + assert { + condition = [for v in output.all_volumes : v if v.name == "docker-cache"][0].mount_path == "/var/lib/docker" + error_message = "docker-cache mount_path must be /var/lib/docker" + } +} + +run "docker_cache_volume_absent_when_docker_false" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000022" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { + prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"docker\":false,\"extra_volumes\":[{\"path\":\"/cache\",\"size\":\"2Gi\"}]}" + } + } + + # docker=false: no docker-cache volume. But extra_volumes still maps. + assert { + condition = length([for v in output.all_volumes : v if v.name == "docker-cache"]) == 0 + error_message = "docker-cache volume must NOT be in all_volumes when docker=false (EARS-11)" + } + assert { + condition = length([for v in output.all_volumes : v if v.name == "cache"]) == 1 + error_message = "extra_volumes must still be mapped into all_volumes when docker=false" + } +} From 13b18df34296db7cf53f562044e4c58e136f8bc1 Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 20:40:02 -0700 Subject: [PATCH 19/25] feat(terraform): map extra_volumes and gate docker-cache on local.docker Co-Authored-By: Claude Sonnet 4.6 --- terraform/main.tf | 29 ++++++++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/terraform/main.tf b/terraform/main.tf index 78b19e5..0a3b155 100644 --- a/terraform/main.tf +++ b/terraform/main.tf @@ -98,6 +98,31 @@ locals { requests = { cpu = "250m", memory = "1Gi", "ephemeral-storage" = "5Gi" } limits = { cpu = "2", memory = "4Gi", "ephemeral-storage" = "20Gi" } } + + # ── Extra volumes mapped to workspace-pod module shape ───────────────────── + mapped_extra_volumes = [ + for v in local.extra_volumes : { + name = replace(trim(v.path, "/"), "/", "-") + size = v.size + mount_path = v.path + persistent = true + count = 1 + containers = "dev" + } + ] + + # ── Docker cache volume (gated on local.docker) ──────────────────────────── + docker_cache_volume = local.docker ? [{ + name = "docker-cache" + size = "10Gi" + mount_path = "/var/lib/docker" + persistent = false + count = 1 + containers = "dind" + }] : [] + + # ── Composed volumes list passed to workspace-pod ────────────────────────── + all_volumes = concat(local.docker_cache_volume, local.mapped_extra_volumes) } # ─── Coder Agent ───────────────────────────────────────────────────────────── @@ -301,9 +326,7 @@ module "workspace" { dev_resources = local.dev_resources dind_resources = local.dind_resources - volumes = [ - { name = "docker-cache", size = "10Gi", mount_path = "/var/lib/docker", persistent = false, containers = "dind" }, - ] + volumes = local.all_volumes } # ─── AI Task ───────────────────────────────────────────────────────���───────── From 20211611c2db85a2df824886433424c08489b300 Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 20:43:07 -0700 Subject: [PATCH 20/25] test(terraform): cover multi-entry and docker+extras combined volume mapping Co-Authored-By: Claude Sonnet 4.6 --- terraform/main.tf | 2 + terraform/tests/task-metadata.tftest.hcl | 72 ++++++++++++++++++++++++ 2 files changed, 74 insertions(+) diff --git a/terraform/main.tf b/terraform/main.tf index 0a3b155..b459844 100644 --- a/terraform/main.tf +++ b/terraform/main.tf @@ -100,6 +100,8 @@ locals { } # ── Extra volumes mapped to workspace-pod module shape ───────────────────── + # PVC name is derived as `replace(trim(path, "/"), "/", "-")` — e.g., + # "/home/runner/cache" → "home-runner-cache", "/cache" → "cache". mapped_extra_volumes = [ for v in local.extra_volumes : { name = replace(trim(v.path, "/"), "/", "-") diff --git a/terraform/tests/task-metadata.tftest.hcl b/terraform/tests/task-metadata.tftest.hcl index 97f0839..0be7348 100644 --- a/terraform/tests/task-metadata.tftest.hcl +++ b/terraform/tests/task-metadata.tftest.hcl @@ -626,6 +626,44 @@ run "extra_volumes_mapped_to_module_shape" { condition = output.mapped_extra_volumes[0].name == "home-runner-cache" error_message = "PVC name must be input path with leading slash trimmed and remaining slashes replaced with dashes" } + assert { + condition = output.mapped_extra_volumes[0].count == 1 + error_message = "mapped extra volume must have count=1" + } +} + +run "multiple_extra_volumes_mapped" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000023" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { + prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"extra_volumes\":[{\"path\":\"/a\",\"size\":\"1Gi\"},{\"path\":\"/b/c\",\"size\":\"2Gi\"}]}" + } + } + + # Two entries should produce two independent module volumes. + assert { + condition = length(output.mapped_extra_volumes) == 2 + error_message = "two extra_volumes entries must produce two module volumes (EARS-13)" + } + assert { + condition = length([for v in output.mapped_extra_volumes : v if v.name == "a" && v.mount_path == "/a" && v.size == "1Gi"]) == 1 + error_message = "single-segment path /a must map to name=a, mount_path=/a, size=1Gi" + } + assert { + condition = length([for v in output.mapped_extra_volumes : v if v.name == "b-c" && v.mount_path == "/b/c" && v.size == "2Gi"]) == 1 + error_message = "multi-segment path /b/c must map to name=b-c, mount_path=/b/c, size=2Gi" + } } run "extra_volumes_default_empty" { @@ -726,3 +764,37 @@ run "docker_cache_volume_absent_when_docker_false" { error_message = "extra_volumes must still be mapped into all_volumes when docker=false" } } + +run "docker_true_plus_extra_volumes" { + command = plan + + override_data { + target = data.coder_workspace.me + values = { + start_count = 1 + name = "t" + id = "00000000-0000-0000-0000-000000000024" + access_url = "https://example.test" + } + } + override_data { + target = data.coder_task.me + values = { + prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"docker\":true,\"extra_volumes\":[{\"path\":\"/data\",\"size\":\"4Gi\"}]}" + } + } + + # docker=true should produce docker-cache + one mapped extra volume = 2 total. + assert { + condition = length(output.all_volumes) == 2 + error_message = "docker=true + 1 extra_volume must yield 2 total volumes (EARS-12 + EARS-13)" + } + assert { + condition = length([for v in output.all_volumes : v if v.name == "docker-cache"]) == 1 + error_message = "docker-cache must be present when docker=true, even with extra_volumes" + } + assert { + condition = length([for v in output.all_volumes : v if v.name == "data"]) == 1 + error_message = "extra_volume must still be mapped when docker=true" + } +} From 365648f6bcbd799623018d8569257951a951f7db Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 20:44:30 -0700 Subject: [PATCH 21/25] test(terraform): assert docker-cache size==10Gi --- terraform/tests/task-metadata.tftest.hcl | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/terraform/tests/task-metadata.tftest.hcl b/terraform/tests/task-metadata.tftest.hcl index 0be7348..baf4a65 100644 --- a/terraform/tests/task-metadata.tftest.hcl +++ b/terraform/tests/task-metadata.tftest.hcl @@ -733,6 +733,10 @@ run "docker_cache_volume_present_when_docker_true" { condition = [for v in output.all_volumes : v if v.name == "docker-cache"][0].mount_path == "/var/lib/docker" error_message = "docker-cache mount_path must be /var/lib/docker" } + assert { + condition = [for v in output.all_volumes : v if v.name == "docker-cache"][0].size == "10Gi" + error_message = "docker-cache size must be 10Gi (EARS-12)" + } } run "docker_cache_volume_absent_when_docker_false" { From 15202f9a8806277aa4380907dc4a26c767ecf1fa Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 22:24:45 -0700 Subject: [PATCH 22/25] Add initial TF config --- package-lock.json | 36 ++--- terraform/.terraform.lock.hcl | 45 +++++++ terraform/README.md | 124 ++++++++++++++++++ .../claude-code-agent/.terraform.lock.hcl | 25 ++++ terraform/modules/claude-code-agent/main.tf | 61 +++++++++ .../modules/claude-code-agent/outputs.tf | 4 + .../modules/claude-code-agent/variables.tf | 57 ++++++++ 7 files changed, 327 insertions(+), 25 deletions(-) create mode 100644 terraform/.terraform.lock.hcl create mode 100644 terraform/README.md create mode 100644 terraform/modules/claude-code-agent/.terraform.lock.hcl create mode 100644 terraform/modules/claude-code-agent/main.tf create mode 100644 terraform/modules/claude-code-agent/outputs.tf create mode 100644 terraform/modules/claude-code-agent/variables.tf diff --git a/package-lock.json b/package-lock.json index e80be5c..90f4749 100644 --- a/package-lock.json +++ b/package-lock.json @@ -333,7 +333,8 @@ "resolved": "https://registry.npmjs.org/@cloudflare/workers-types/-/workers-types-4.20260418.1.tgz", "integrity": "sha512-bywXb2XmeSqrLCQYipcupLneqx015YhhNWz2v9b9iatpe8Cg551vP7ZuD5S2a6GfBka0dDnO70kIBiBvFglcrg==", "dev": true, - "license": "MIT OR Apache-2.0" + "license": "MIT OR Apache-2.0", + "peer": true }, "node_modules/@cspotcode/source-map-support": { "version": "0.8.1", @@ -348,30 +349,6 @@ "node": ">=12" } }, - "node_modules/@emnapi/core": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.10.0.tgz", - "integrity": "sha512-yq6OkJ4p82CAfPl0u9mQebQHKPJkY7WrIuk205cTYnYe+k2Z8YBh11FrbRG/H6ihirqcacOgl2BIO8oyMQLeXw==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "@emnapi/wasi-threads": "1.2.1", - "tslib": "^2.4.0" - } - }, - "node_modules/@emnapi/runtime": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.10.0.tgz", - "integrity": "sha512-ewvYlk86xUoGI0zQRNq/mC+16R1QeDlKQy21Ki3oSYXNgLb45GV1P6A0M+/s6nyCuNDqe5VpaY84BzXGwVbwFA==", - "dev": true, - "license": "MIT", - "optional": true, - "dependencies": { - "tslib": "^2.4.0" - } - }, "node_modules/@emnapi/wasi-threads": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.2.1.tgz", @@ -1430,6 +1407,7 @@ "node_modules/@octokit/core": { "version": "7.0.6", "license": "MIT", + "peer": true, "dependencies": { "@octokit/auth-token": "^6.0.0", "@octokit/graphql": "^9.0.3", @@ -1989,6 +1967,7 @@ "integrity": "sha512-+qIYRKdNYJwY3vRCZMdJbPLJAtGjQBudzZzdzwQYkEPQd+PJGixUL5QfvCLDaULoLv+RhT3LDkwEfKaAkgSmNQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "undici-types": "~7.19.0" } @@ -2057,6 +2036,7 @@ "integrity": "sha512-xTp7VZ5aXP5ZJrn15UtJUWlx6qXLnGtF6jNxHepdPHpMfz/aVPx+htHtgcAL2mDXJgKhpoo2e9/hVJsIeFbytQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@vitest/utils": "4.1.4", "pathe": "^2.0.3" @@ -2071,6 +2051,7 @@ "integrity": "sha512-MCjCFgaS8aZz+m5nTcEcgk/xhWv0rEH4Yl53PPlMXOZ1/Ka2VcZU6CJ+MgYCZbcJvzGhQRjVrGQNZqkGPttIKw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@vitest/pretty-format": "4.1.4", "@vitest/utils": "4.1.4", @@ -2674,6 +2655,7 @@ "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -2944,6 +2926,7 @@ "integrity": "sha512-i7qRCmY42zmCwnYlh9H2SvLEypEFGye5iRmEMKjcGi7zk9UquigRjFtTLz0TYqr0ZGLZhaMHl/foy1bZR+Cwlw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "pathe": "^2.0.3" } @@ -2962,6 +2945,7 @@ "integrity": "sha512-dbU7/iLVa8KZALJyLOBOQ88nOXtNG8vxKuOT4I2mD+Ya70KPceF4IAmDsmU0h1Qsn5bPrvsY9HJstCRh3hG6Uw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "lightningcss": "^1.32.0", "picomatch": "^4.0.4", @@ -3040,6 +3024,7 @@ "integrity": "sha512-tFuJqTxKb8AvfyqMfnavXdzfy3h3sWZRWwfluGbkeR7n0HUev+FmNgZ8SDrRBTVrVCjgH5cA21qGbCffMNtWvg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@vitest/expect": "4.1.4", "@vitest/mocker": "4.1.4", @@ -3148,6 +3133,7 @@ "dev": true, "hasInstallScript": true, "license": "Apache-2.0", + "peer": true, "bin": { "workerd": "bin/workerd" }, diff --git a/terraform/.terraform.lock.hcl b/terraform/.terraform.lock.hcl new file mode 100644 index 0000000..36078d1 --- /dev/null +++ b/terraform/.terraform.lock.hcl @@ -0,0 +1,45 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/coder/coder" { + version = "2.15.0" + constraints = ">= 2.12.0, >= 2.13.0" + hashes = [ + "h1:tYNavbEhcqzlIwpSe1GMrV/726+u703m2XGbinj3LPg=", + "zh:10897edfe4ecb975ce11b6b2dfb37317f07c725404d2a60b5fa4e114808259b9", + "zh:10b1af473883a9524353011943cfab89b401fc84ed38608a798e377aaa4ecebf", + "zh:4678c3b329e47a4c3fb9683db4850470e8ef6ede570f6a2bb99701f1125b4215", + "zh:4c2df7c4d8f0fc8546536c886c0984e7173dcc2d3759218fdae3d4bf2703af14", + "zh:72e0b7297f3e20abe2a81e34fe4976caa79691857b6355a2b9492f3ddc85aa9e", + "zh:773077f4eaaf6a31154f1d8aa63b4ef3bbe34104271c4d9cf065261cba8814a9", + "zh:80b1eb2aa2d18ce2ff26e02fa179994fd137031c9c4e2cce0d547b126eadf62e", + "zh:8efdf98494ec442630efb48aabc8dbf10b03254f3f2a2247f519dbf005c5aabc", + "zh:a65d987f531bf0a41cc5d68fd46f675cb37e8570a8a42579bc30e22312b3df4d", + "zh:bb2c57695e801994604542791ff87ed4b7e0d94ffa9d4c6a0ec34260f4616a49", + "zh:be9a5086d498b941e08e9c30b4de5151b15dfab526083387dd47e9451d7bde53", + "zh:de8fe0131db31511c8d4e02b1b58aa2b2bc82ca50188f2ed1d9d731d70321fb2", + "zh:e1d95002571d9025631f9dc98f441e22cd68783a27e9e35925bda21dbd94f904", + "zh:eb0de36ba625d187dce45a24ad9e724bafff821fb466d014cc7d9a02d2d72309", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} + +provider "registry.terraform.io/hashicorp/kubernetes" { + version = "2.35.1" + constraints = "~> 2.35.0" + hashes = [ + "h1:zgXeWvp4//Ry+4glwNrLMpPFOU8QBQlARNmR9WCNe9o=", + "zh:12212ca5ae47823ce14bfafb909eeb6861faf1e2435fb2fc4a8b334b3544b5f5", + "zh:3f49b3d77182df06b225ab266667de69681c2e75d296867eb2cf06a8f8db768c", + "zh:40832494d19f8a2b3cd0c18b80294d0b23ef6b82f6f6897b5fe00248a9997460", + "zh:739a5ddea61a77925ee7006a29c8717377a2e9d0a79a0bbd98738d92eec12c0d", + "zh:a02b472021753627c5c39447a56d125a32214c29ff9108fc499f2dcdf4f1cc4f", + "zh:b78865b3867065aa266d6758c9601a2756741478f5735a838c20d633d65e085b", + "zh:d362e87464683f5632790e66920ea803adb54c2bc0cb24b6fd9a314d2b1efffd", + "zh:d98206fe88c2c9a52b8d2d0cb2c877c812a4a51d19f9d8428e63cbd5fd8a304d", + "zh:dfa320946b1ce3f3615c42b3447a28dc9f604c06d8b9a6fe289855ab2ade4d11", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:fc1debd2e695b5222d2ccc8b24dab65baba4ee2418ecce944e64d42e79474cb5", + "zh:fdaf960443720a238c09e519aeb30faf74f027ac5d1e0a309c3b326888e031d7", + ] +} diff --git a/terraform/README.md b/terraform/README.md new file mode 100644 index 0000000..82f66e2 --- /dev/null +++ b/terraform/README.md @@ -0,0 +1,124 @@ +--- +display_name: Task +description: An ephemeral template for autonomous AI coding tasks +icon: ../../../site/static/emojis/1f916.png +maintainer_github: neekolas +verified: true +tags: [] +--- + +# Coder Tasks + +Coder Tasks let you assign a GitHub issue to an AI agent that autonomously resolves it — reading the issue, writing code, running tests, and opening a PR. The agent runs in an ephemeral Coder workspace that is created when the task starts and destroyed when the issue closes. + +## How It Works + +``` +Assign issue to agent user + │ + ▼ +GitHub App creates a Coder workspace + │ + ▼ +Agent reads the issue, explores the repo, writes a spec + │ + ▼ +Agent implements the fix/feature and opens a PR + │ + ▼ +Humans review the PR; comments are forwarded to the agent + │ + ▼ +CI failures are automatically sent to the agent to fix + │ + ▼ +Issue closes → workspace is deleted +``` + +The system has two parts: + +- **This template** provisions an ephemeral Kubernetes pod with Claude Code running in fully autonomous mode. It parses a GitHub issue URL from the task prompt, clones the repo, and instructs the agent to resolve the issue. +- **[coder-action](https://github.com/xmtplabs/coder-action)** is a GitHub App that handles the lifecycle — creating workspaces when issues are assigned, forwarding PR comments and CI failures to the agent, and cleaning up when issues close. + +## Adding Tasks to Your Repo + +### Prerequisites + +- A running Coder deployment with this template installed (e.g. [sandbox.xmtp.team](https://sandbox.xmtp.team)) +- A GitHub user account for the agent (e.g. `xmtp-coder-agent`). Already exists. + +### Setup + +Install the [coder-action](https://github.com/xmtplabs/coder-action) GitHub App on your organization (all repos or select repos). The app receives webhooks directly from GitHub — no workflow files are needed in your repository. + +The app handles five event types: + +| Trigger | What happens | +|---------|-------------| +| Issue assigned to agent user | Creates a Coder workspace and starts the task | +| Issue closed | Stops and deletes the workspace | +| Comment on the issue | Forwarded to the running agent | +| Comment on the agent's PR | Forwarded to the running agent | +| CI check fails on agent's PR | Failed job logs are sent to the agent | + +See the [coder-action README](https://github.com/xmtplabs/coder-action/blob/main/README.md) for GitHub App registration, deployment, and configuration details. This is already done for `xmtp` and `xmtplabs` repos. + +### Repository Requirements + +Your repo should have a `.devcontainer/devcontainer.json` so the workspace can build a development environment with the right toolchain. A minimal example: + +```jsonc +// .devcontainer/devcontainer.json +{ + "image": "mcr.microsoft.com/devcontainers/base:ubuntu", + "features": { + "ghcr.io/devcontainers/features/docker-outside-of-docker:1": {} + } +} +``` + +## Using Tasks + +### Assigning an Issue + +To start a task, assign a GitHub issue to your agent user (e.g. `xmtp-coder-agent`). The app validates that the assigner has write access to the repo, then creates a workspace. + +A comment is posted on the issue with a link to the running task in the Coder dashboard. + +### Interacting with the Agent + +- **Issue comments** — post a comment on the issue to give the agent new instructions or context. The comment is forwarded to the agent's active session. +- **PR review comments** — review the agent's PR as you would any other. Comments are forwarded to the agent, which will attempt to address them. +- **CI failures** — if a monitored workflow fails on the agent's PR, the failed job logs are automatically sent to the agent so it can self-correct. + +### Monitoring Progress + +Open the task link from the issue comment to view the agent's terminal session in the Coder dashboard. You can watch it work in real time. + +### Stopping a Task + +Close the GitHub issue. The app deletes the workspace and frees all resources. + +## What the Agent Does + +Inside the workspace, the agent follows the [coder-task](https://github.com/xmtplabs/code-factory/blob/main/skills/coder-task/SKILL.md) workflow: + +1. Reads the GitHub issue +2. Forks the repo and creates a working branch +3. Explores the codebase to understand relevant code and tests +4. Writes a spec (posted as an issue comment) if the issue doesn't already contain one +5. Decomposes the spec into implementation tasks +6. Implements the changes with tests +7. Opens a PR that references the issue + +## Template Details + +The template provisions: + +- An ephemeral Kubernetes pod (destroyed when the task ends) +- A devcontainer built from the repo's `.devcontainer/devcontainer.json` via [envbuilder](https://github.com/coder/envbuilder) +- Docker-in-Docker sidecar for container builds +- Claude Code in fully autonomous mode with LSP support (Go, Rust, TypeScript) +- 30 GB workspace disk + 1 GB persistent agent state + +Resources: 2 CPU / 8 GB memory guaranteed, burst to 8 CPU / 24 GB. diff --git a/terraform/modules/claude-code-agent/.terraform.lock.hcl b/terraform/modules/claude-code-agent/.terraform.lock.hcl new file mode 100644 index 0000000..da7746b --- /dev/null +++ b/terraform/modules/claude-code-agent/.terraform.lock.hcl @@ -0,0 +1,25 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/coder/coder" { + version = "2.15.0" + constraints = ">= 2.12.0, >= 2.13.0" + hashes = [ + "h1:tYNavbEhcqzlIwpSe1GMrV/726+u703m2XGbinj3LPg=", + "zh:10897edfe4ecb975ce11b6b2dfb37317f07c725404d2a60b5fa4e114808259b9", + "zh:10b1af473883a9524353011943cfab89b401fc84ed38608a798e377aaa4ecebf", + "zh:4678c3b329e47a4c3fb9683db4850470e8ef6ede570f6a2bb99701f1125b4215", + "zh:4c2df7c4d8f0fc8546536c886c0984e7173dcc2d3759218fdae3d4bf2703af14", + "zh:72e0b7297f3e20abe2a81e34fe4976caa79691857b6355a2b9492f3ddc85aa9e", + "zh:773077f4eaaf6a31154f1d8aa63b4ef3bbe34104271c4d9cf065261cba8814a9", + "zh:80b1eb2aa2d18ce2ff26e02fa179994fd137031c9c4e2cce0d547b126eadf62e", + "zh:8efdf98494ec442630efb48aabc8dbf10b03254f3f2a2247f519dbf005c5aabc", + "zh:a65d987f531bf0a41cc5d68fd46f675cb37e8570a8a42579bc30e22312b3df4d", + "zh:bb2c57695e801994604542791ff87ed4b7e0d94ffa9d4c6a0ec34260f4616a49", + "zh:be9a5086d498b941e08e9c30b4de5151b15dfab526083387dd47e9451d7bde53", + "zh:de8fe0131db31511c8d4e02b1b58aa2b2bc82ca50188f2ed1d9d731d70321fb2", + "zh:e1d95002571d9025631f9dc98f441e22cd68783a27e9e35925bda21dbd94f904", + "zh:eb0de36ba625d187dce45a24ad9e724bafff821fb466d014cc7d9a02d2d72309", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} diff --git a/terraform/modules/claude-code-agent/main.tf b/terraform/modules/claude-code-agent/main.tf new file mode 100644 index 0000000..a379d45 --- /dev/null +++ b/terraform/modules/claude-code-agent/main.tf @@ -0,0 +1,61 @@ +terraform { + required_providers { + coder = { source = "coder/coder" } + } +} + +# ─── Locals ────────────────────────────────────────────────────────────────── + +locals { + enabled_plugins = { for p in var.plugins : p => true } + + marketplace_map = { for m in var.marketplaces : split("/", m)[1] => { + source = { source = "github", repo = m } + } } + + settings = jsonencode({ + permissions = { + defaultMode = "bypassPermissions" + } + enableRemoteControl = true + skipDangerousModePermissionPrompt = true + extraKnownMarketplaces = local.marketplace_map + enabledPlugins = local.enabled_plugins + }) + + plugin_install_commands = join("\n", [for p in var.plugins : "claude plugin install ${p} --scope user"]) + + post_install_script = join("\n", compact([ + var.post_install_script, + local.plugin_install_commands, + ])) +} + +# ─── Claude Code Module ────────────────────────────────────────────────────── + +module "claude-code" { + count = var.start_count + source = "registry.coder.com/coder/claude-code/coder" + version = "4.9.1" + agent_id = var.agent_id + workdir = var.work_dir + model = "opus" + claude_code_oauth_token = var.oauth_token + + pre_install_script = <<-EOT + # Symlink persistent agent state into $HOME so it survives workspace restarts + mkdir -p /persist/agent-state/claude /persist/agent-state/claude-module + ln -sfn /persist/agent-state/claude "$HOME/.claude" + ln -sfn /persist/agent-state/claude-module "$HOME/.claude-module" + + # Configure Claude Code settings + mkdir -p ~/.claude + cat > ~/.claude/settings.json <<'SETTINGS' + ${local.settings} + SETTINGS + EOT + + post_install_script = local.post_install_script + ai_prompt = var.ai_prompt + mcp = var.mcp != "" ? var.mcp : null +} diff --git a/terraform/modules/claude-code-agent/outputs.tf b/terraform/modules/claude-code-agent/outputs.tf new file mode 100644 index 0000000..7f5b4e5 --- /dev/null +++ b/terraform/modules/claude-code-agent/outputs.tf @@ -0,0 +1,4 @@ +output "task_app_id" { + description = "Claude Code task app ID (for coder_ai_task)" + value = length(module.claude-code) > 0 ? module.claude-code[0].task_app_id : "" +} diff --git a/terraform/modules/claude-code-agent/variables.tf b/terraform/modules/claude-code-agent/variables.tf new file mode 100644 index 0000000..ce52892 --- /dev/null +++ b/terraform/modules/claude-code-agent/variables.tf @@ -0,0 +1,57 @@ +variable "start_count" { + type = number + description = "data.coder_workspace.me.start_count" +} + +variable "agent_id" { + type = string + description = "coder_agent resource ID" +} + +variable "work_dir" { + type = string + description = "Working directory for Claude Code" +} + +variable "oauth_token" { + type = string + sensitive = true + description = "Claude Code OAuth token" +} + +variable "ai_prompt" { + type = string + description = "Optional AI prompt. When set, enables autonomous mode (skipDangerousModePermissionPrompt)" + default = "" +} + +variable "marketplaces" { + type = list(string) + description = "Extra known marketplaces added to settings.json" + default = ["xmtplabs/code-factory"] +} + +variable "plugins" { + type = list(string) + description = "Plugins to install via 'claude plugin install --scope user' and enable in settings" + default = [ + "code-factory@code-factory", + "ralph-loop@claude-plugins-official", + "code-simplifier@claude-plugins-official", + "rust-analyzer-lsp@claude-plugins-official", + "gopls-lsp@claude-plugins-official", + "typescript-lsp@claude-plugins-official", + ] +} + +variable "mcp" { + type = string + description = "MCP server configuration JSON string passed to the Claude Code module" + default = "" +} + +variable "post_install_script" { + type = string + description = "Script to run after Claude Code and plugin installation" + default = "" +} From 8502827fca4e6d328241d54c6d528da9b1a982b6 Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 23:24:12 -0700 Subject: [PATCH 23/25] Update template inputs --- .devcontainer/devcontainer.json | 14 - .github/workflows/deploy-template.yaml | 41 - .github/workflows/terraform-lint.yaml | 45 - .gitignore | 1 - .zed/settings.json | 12 - AGENTS.md | 17 - README.md | 15 - src/config/app-config.ts | 4 + src/config/repo-config-schema.test.ts | 57 +- src/config/repo-config-schema.ts | 40 +- src/durable-objects/repo-config-do.test.ts | 4 +- src/events/types.ts | 2 +- src/services/coder/service.ts | 6 +- src/services/task-runner.ts | 2 + src/webhooks/github/router.test.ts | 1 + src/webhooks/github/router.ts | 1 + src/workflows/instance-id.test.ts | 6 +- src/workflows/steps/create-task.test.ts | 107 ++- src/workflows/steps/create-task.ts | 40 +- src/workflows/steps/template-inputs.ts | 74 ++ src/workflows/task-runner-workflow.test.ts | 24 +- src/workflows/task-runner-workflow.ts | 9 +- terraform/.terraform.lock.hcl | 45 - terraform/.tflint.hcl | 4 - terraform/README.md | 124 --- terraform/main.tf | 355 -------- .../claude-code-agent/.terraform.lock.hcl | 25 - terraform/modules/claude-code-agent/main.tf | 61 -- .../modules/claude-code-agent/outputs.tf | 4 - .../modules/claude-code-agent/variables.tf | 57 -- terraform/modules/workspace-pod/main.tf | 521 ------------ terraform/modules/workspace-pod/outputs.tf | 36 - terraform/modules/workspace-pod/variables.tf | 162 ---- terraform/outputs.tf | 38 - terraform/tests/task-metadata.tftest.hcl | 804 ------------------ 35 files changed, 346 insertions(+), 2412 deletions(-) delete mode 100644 .github/workflows/deploy-template.yaml delete mode 100644 .github/workflows/terraform-lint.yaml create mode 100644 src/workflows/steps/template-inputs.ts delete mode 100644 terraform/.terraform.lock.hcl delete mode 100644 terraform/.tflint.hcl delete mode 100644 terraform/README.md delete mode 100644 terraform/main.tf delete mode 100644 terraform/modules/claude-code-agent/.terraform.lock.hcl delete mode 100644 terraform/modules/claude-code-agent/main.tf delete mode 100644 terraform/modules/claude-code-agent/outputs.tf delete mode 100644 terraform/modules/claude-code-agent/variables.tf delete mode 100644 terraform/modules/workspace-pod/main.tf delete mode 100644 terraform/modules/workspace-pod/outputs.tf delete mode 100644 terraform/modules/workspace-pod/variables.tf delete mode 100644 terraform/outputs.tf delete mode 100644 terraform/tests/task-metadata.tftest.hcl diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 6fce328..0b04152 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -3,17 +3,12 @@ "image": "mcr.microsoft.com/devcontainers/typescript-node:24", "features": { "ghcr.io/devcontainers/features/github-cli:1": {}, - "ghcr.io/devcontainers/features/terraform:1": { - "version": "1.9.8", - "tflint": "0.53.0" - } }, "postCreateCommand": "npm install", "customizations": { "vscode": { "extensions": [ "biomejs.biome", - "hashicorp.terraform" ], "settings": { "editor.defaultFormatter": "biomejs.biome", @@ -22,15 +17,6 @@ "[typescript]": { "editor.defaultFormatter": "biomejs.biome" }, - "[terraform]": { - "editor.defaultFormatter": "hashicorp.terraform", - "editor.formatOnSave": true, - "editor.tabSize": 2 - }, - "[terraform-vars]": { - "editor.defaultFormatter": "hashicorp.terraform", - "editor.formatOnSave": true - } } } } diff --git a/.github/workflows/deploy-template.yaml b/.github/workflows/deploy-template.yaml deleted file mode 100644 index 9bb015d..0000000 --- a/.github/workflows/deploy-template.yaml +++ /dev/null @@ -1,41 +0,0 @@ -name: Deploy Coder Template - -on: - push: - branches: [main] - paths: - - "terraform/**" - - ".github/workflows/deploy-template.yaml" - workflow_dispatch: - -permissions: - contents: read - -concurrency: - group: deploy-template - cancel-in-progress: false - -jobs: - deploy: - runs-on: ubuntu-latest - environment: coder - env: - CODER_URL: ${{ secrets.CODER_URL }} - CODER_SESSION_TOKEN: ${{ secrets.CODER_SESSION_TOKEN }} - TEMPLATE_NAME: task-beta - steps: - - uses: actions/checkout@v4 - - - name: Install Coder CLI - run: curl -fsSL https://coder.com/install.sh | sh -s -- --version 2.18.2 - - - name: Verify Coder auth - run: coder users show me - - - name: Push template - run: | - coder templates push "$TEMPLATE_NAME" \ - --directory ./terraform \ - --yes \ - --name "run-${GITHUB_RUN_ID}" \ - --message "Deploy from ${GITHUB_SHA::7}" diff --git a/.github/workflows/terraform-lint.yaml b/.github/workflows/terraform-lint.yaml deleted file mode 100644 index a16df22..0000000 --- a/.github/workflows/terraform-lint.yaml +++ /dev/null @@ -1,45 +0,0 @@ -name: Terraform Lint - -on: - pull_request: - paths: - - "terraform/**" - - ".github/workflows/terraform-lint.yaml" - -permissions: - contents: read - -jobs: - lint: - runs-on: ubuntu-latest - defaults: - run: - working-directory: terraform - steps: - - uses: actions/checkout@v4 - - - uses: hashicorp/setup-terraform@v3 - with: - terraform_version: "1.9.8" - terraform_wrapper: false - - - name: terraform fmt - run: terraform fmt -check -recursive -diff - - - name: terraform init - run: terraform init -backend=false - - - name: terraform test - run: terraform test - - - uses: terraform-linters/setup-tflint@v4 - with: - tflint_version: v0.53.0 - - - name: tflint --init - run: tflint --init - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: tflint - run: tflint --recursive --format compact diff --git a/.gitignore b/.gitignore index d0c5767..66332b3 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,3 @@ docs/plans/ !.dev.vars.example .wrangler/ .claude/ -.terraform/ diff --git a/.zed/settings.json b/.zed/settings.json index 0a50593..9eb83d1 100644 --- a/.zed/settings.json +++ b/.zed/settings.json @@ -27,17 +27,5 @@ "JSONC": { "formatter": { "language_server": { "name": "biome" } } }, - "Terraform": { - "format_on_save": "on", - "formatter": "language_server", - "language_servers": ["terraform-ls"], - "tab_size": 2 - }, - "Terraform Vars": { - "format_on_save": "on", - "formatter": "language_server", - "language_servers": ["terraform-ls"], - "tab_size": 2 - } } } diff --git a/AGENTS.md b/AGENTS.md index 5223b56..04ff8a8 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -103,23 +103,6 @@ Cross-cutting: - **[docs/gotchas.md](docs/gotchas.md)** — collected foot-guns with context. Read before non-trivial changes. - **[docs/testing.md](docs/testing.md)** — test layers, `introspectWorkflow` patterns, fetch-mocking options. -## Terraform / Coder template - -The Coder template lives in [`terraform/`](terraform/) (template name -`task-beta`). Two workflows guard it: - -- [`.github/workflows/terraform-lint.yaml`](.github/workflows/terraform-lint.yaml) - — PR gate: `terraform fmt -check` + `tflint --recursive` (config in - [`terraform/.tflint.hcl`](terraform/.tflint.hcl)). -- [`.github/workflows/deploy-template.yaml`](.github/workflows/deploy-template.yaml) - — `main` push + manual dispatch: runs - `coder templates push task-beta --directory ./terraform`, authenticated with - the `CODER_URL` + `CODER_SESSION_TOKEN` repo secrets. - -Local: the devcontainer installs `terraform` + `tflint` at the same versions -CI pins (1.9.8 / 0.53.0). `.terraform/` is gitignored; -`terraform/.terraform.lock.hcl` is committed. - ## How to extend - **[docs/adding-an-event-type.md](docs/adding-an-event-type.md)** — checklist for wiring a new GitHub event into the router + a new step factory + tests. diff --git a/README.md b/README.md index c86dc4c..b7becc9 100644 --- a/README.md +++ b/README.md @@ -37,21 +37,6 @@ Two GitHub identities work together: See [docs/github-app-setup.md](docs/github-app-setup.md) for step-by-step instructions: creating the GitHub App, configuring webhook delivery, and installing it on your repositories. -## Coder Template - -This repo also ships the Coder template that provisions the ephemeral workspace -a task runs in. The template lives in [`terraform/`](terraform/README.md) and is -named **`task-beta`** on the Coder deployment. - -- [`terraform-lint.yaml`](.github/workflows/terraform-lint.yaml) gates PRs that - touch `terraform/**` on `terraform fmt` and `tflint`. -- [`deploy-template.yaml`](.github/workflows/deploy-template.yaml) pushes the - template to the Coder deployment (via `coder templates push task-beta`) on - every merge to `main` that touches `terraform/**`, and on manual dispatch. - -See [`terraform/README.md`](terraform/README.md) for what the template -provisions. - ## Configuration All non-secret config lives in [`wrangler.toml`](wrangler.toml) under `[vars]`. Secrets are provisioned via `wrangler secret put` in production and `.dev.vars` locally (see [`.dev.vars.example`](.dev.vars.example)). diff --git a/src/config/app-config.ts b/src/config/app-config.ts index cf6287b..c03a00a 100644 --- a/src/config/app-config.ts +++ b/src/config/app-config.ts @@ -12,6 +12,10 @@ const AppConfigSchema = z.object({ coderTaskNamePrefix: z.string().min(1).default("gh"), coderTemplateName: z.string().min(1).default("task-template"), coderTemplateNameCodex: z.string().min(1).default("task-template-codex"), + // Hardcoded template name used when a repo-level config is present. Not + // env-overridable: the field is intentionally omitted from `loadConfig`'s + // env→raw mapping so it always falls through to this default. + codeFactoryTemplate: z.string().min(1).default("code-factory"), coderTemplatePreset: z.string().min(1).optional(), coderOrganization: z.string().min(1).default("default"), logFormat: z.string().optional(), diff --git a/src/config/repo-config-schema.test.ts b/src/config/repo-config-schema.test.ts index 38bdbb5..932b200 100644 --- a/src/config/repo-config-schema.test.ts +++ b/src/config/repo-config-schema.test.ts @@ -22,7 +22,7 @@ path = "/data" size = "20gb" [harness] -provider = "claude" +provider = "claude_code" [[scheduled_jobs]] name = "nightly" @@ -35,9 +35,9 @@ prompt = "Do the thing" expect(parsed.sandbox?.docker).toBe(true); expect(parsed.sandbox?.volumes?.[0]).toEqual({ path: "/data", - size: "20gb", + size: "20Gi", }); - expect(parsed.harness?.provider).toBe("claude"); + expect(parsed.harness?.provider).toBe("claude_code"); expect(parsed.scheduled_jobs?.[0]?.name).toBe("nightly"); }); test("unknown keys are dropped (write-side loose-parse)", () => { @@ -94,21 +94,21 @@ describe("resolveRepoConfigSettings — defaults applied on read", () => { expect(r.sandbox.size).toBe("medium"); expect(r.sandbox.docker).toBe(false); expect(r.sandbox.volumes).toEqual([]); - expect(r.harness.provider).toBe("claude"); + expect(r.harness.provider).toBe("claude_code"); expect(r.scheduled_jobs).toEqual([]); }); test("empty object → full defaults", () => { expect(resolveRepoConfigSettings({})).toEqual({ sandbox: { size: "medium", docker: false, volumes: [] }, - harness: { provider: "claude" }, + harness: { provider: "claude_code" }, scheduled_jobs: [], }); }); - test("volume with path-only → size defaulted to '10gb'", () => { + test("volume with path-only → size defaulted to '10Gi'", () => { const r = resolveRepoConfigSettings({ sandbox: { volumes: [{ path: "/data" }] }, }); - expect(r.sandbox.volumes[0]).toEqual({ path: "/data", size: "10gb" }); + expect(r.sandbox.volumes[0]).toEqual({ path: "/data", size: "10Gi" }); }); test("partial override: explicit size beats default", () => { const r = resolveRepoConfigSettings({ @@ -118,3 +118,46 @@ describe("resolveRepoConfigSettings — defaults applied on read", () => { expect(r.sandbox.docker).toBe(false); }); }); + +describe("volume size normalization → canonical Kubernetes binary-SI form", () => { + test.each([ + ["10gb", "10Gi"], + ["10GB", "10Gi"], + ["10Gb", "10Gi"], + ["10G", "10Gi"], + ["10g", "10Gi"], + ["10gi", "10Gi"], + ["10Gi", "10Gi"], + ["500mb", "500Mi"], + ["500M", "500Mi"], + ["500Mi", "500Mi"], + ["2tb", "2Ti"], + ["64k", "64Ki"], + [" 20 GB ", "20Gi"], + ])("parseRepoConfigToml normalizes %s → %s on write", (input, expected) => { + const parsed = parseRepoConfigToml( + `[[sandbox.volumes]]\npath = "/data"\nsize = "${input}"`, + ); + expect(parsed.sandbox?.volumes?.[0]?.size).toBe(expected); + }); + + test("resolveRepoConfigSettings normalizes legacy stored values on read", () => { + // Simulate a stored record written before the normalization transform + // existed — the resolved schema must re-normalize on read. + const r = resolveRepoConfigSettings({ + sandbox: { volumes: [{ path: "/data", size: "20gb" }] }, + }); + expect(r.sandbox.volumes[0]).toEqual({ path: "/data", size: "20Gi" }); + }); + + test.each(["10", "gb", "10bb", "10.5gb", "10eb", "abc"])( + "invalid volume size %s → parse rejects", + (input) => { + expect(() => + parseRepoConfigToml( + `[[sandbox.volumes]]\npath = "/data"\nsize = "${input}"`, + ), + ).toThrow(/Invalid RepoConfig/); + }, + ); +}); diff --git a/src/config/repo-config-schema.ts b/src/config/repo-config-schema.ts index 2df680f..9a86ea9 100644 --- a/src/config/repo-config-schema.ts +++ b/src/config/repo-config-schema.ts @@ -8,7 +8,35 @@ import { z } from "zod"; export const SandboxSizeSchema = z.enum(["small", "medium", "large"]); /** Allowed values for `harness.provider`. */ -export const HarnessProviderSchema = z.enum(["claude", "codex"]); +export const HarnessProviderSchema = z.enum(["claude_code", "codex"]); + +// ── Volume size normalization ──────────────────────────────────────────────── +// Kubernetes PVCs require binary-SI suffixes like `10Gi`. Users routinely +// write `10gb` / `10GB` / `10G` / `10gi`; we accept those shapes and normalize +// everything to `i` before the value leaves the write path. + +const VOLUME_SIZE_REGEX = /^\s*(\d+)\s*(k|kb|ki|m|mb|mi|g|gb|gi|t|tb|ti)\s*$/i; + +function normalizeVolumeSize(input: string): string { + const match = VOLUME_SIZE_REGEX.exec(input); + if (!match) return input; // unreachable: regex validated before transform + const digits = match[1] ?? ""; + const prefix = (match[2] ?? "").charAt(0).toUpperCase(); + return `${digits}${prefix}i`; +} + +/** + * A Kubernetes-compatible volume size. Accepts common variants (`10gb`, + * `10GB`, `10G`, `10gi`, `10Gi`, etc.) and always emits the canonical + * binary-SI form (`10Gi`). Supports `K/M/G/T` prefixes. + */ +export const VolumeSizeSchema = z + .string() + .regex( + VOLUME_SIZE_REGEX, + 'expected a size like "10Gi" (K/M/G/T with optional b/i suffix)', + ) + .transform(normalizeVolumeSize); // ── Sparse (stored) schemas ────────────────────────────────────────────────── // Sparse schemas mirror what users actually wrote in TOML. No `.default()`: @@ -18,7 +46,7 @@ export const HarnessProviderSchema = z.enum(["claude", "codex"]); /** Sparse shape for a single sandbox volume entry. `path` is required. */ export const StoredSandboxVolumeSchema = z.object({ path: z.string(), - size: z.string().optional(), + size: VolumeSizeSchema.optional(), }); /** Sparse shape for the `[sandbox]` section. */ @@ -55,10 +83,10 @@ export const StoredRepoConfigSettingsSchema = z.object({ // Resolved schemas apply defaults on read so every consumer sees a fully // populated object without worrying about whether a field was written. -/** Resolved volume: `size` defaults to `"10gb"` when absent. */ +/** Resolved volume: `size` defaults to `"10Gi"` when absent. */ export const ResolvedSandboxVolumeSchema = z.object({ path: z.string(), - size: z.string().default("10gb"), + size: VolumeSizeSchema.default("10Gi"), }); /** Resolved sandbox: size/docker/volumes all have defaults. */ @@ -68,9 +96,9 @@ export const ResolvedSandboxSchema = z.object({ volumes: z.array(ResolvedSandboxVolumeSchema).default([]), }); -/** Resolved harness: provider defaults to `"claude"`. */ +/** Resolved harness: provider defaults to `"claude_code"`. */ export const ResolvedHarnessSchema = z.object({ - provider: HarnessProviderSchema.default("claude"), + provider: HarnessProviderSchema.default("claude_code"), }); /** diff --git a/src/durable-objects/repo-config-do.test.ts b/src/durable-objects/repo-config-do.test.ts index 8ed96c0..94cfc14 100644 --- a/src/durable-objects/repo-config-do.test.ts +++ b/src/durable-objects/repo-config-do.test.ts @@ -48,7 +48,7 @@ describe("RepoConfigDO — get/set round-trip", () => { expect(read?.settings.sandbox.size).toBe("medium"); expect(read?.settings.sandbox.docker).toBe(false); expect(read?.settings.sandbox.volumes).toEqual([]); - expect(read?.settings.harness.provider).toBe("claude"); + expect(read?.settings.harness.provider).toBe("claude_code"); expect(read?.settings.scheduled_jobs).toEqual([]); }); @@ -86,7 +86,7 @@ describe("RepoConfigDO — get/set round-trip", () => { }); const read = await stub.getRepoConfig(); expect(read?.settings.sandbox.volumes).toEqual([ - { path: "/data", size: "10gb" }, + { path: "/data", size: "10Gi" }, ]); }); diff --git a/src/events/types.ts b/src/events/types.ts index 643033d..b04a54b 100644 --- a/src/events/types.ts +++ b/src/events/types.ts @@ -14,7 +14,7 @@ export type TaskRequestedEvent = { type: "task_requested"; source: EventSource; repository: { owner: string; name: string }; - issue: { number: number; url: string }; + issue: { id: number; number: number; url: string }; requester: { login: string; externalId: number }; }; diff --git a/src/services/coder/service.ts b/src/services/coder/service.ts index 924c26e..ddb1b89 100644 --- a/src/services/coder/service.ts +++ b/src/services/coder/service.ts @@ -358,8 +358,10 @@ export class CoderService implements TaskRunner { taskName: TaskName; owner: string; input: string; + templateName?: string; }): Promise { - const { taskName, owner, input } = params; + const { taskName, owner, input, templateName } = params; + const resolvedTemplateName = templateName ?? this.config.templateName; // 1. Check for an existing task const existing = await this.findTask(taskName, owner); @@ -373,7 +375,7 @@ export class CoderService implements TaskRunner { } // 2. Resolve template - const templateEndpoint = `/api/v2/organizations/${encodeURIComponent(this.config.organization)}/templates/${encodeURIComponent(this.config.templateName)}`; + const templateEndpoint = `/api/v2/organizations/${encodeURIComponent(this.config.organization)}/templates/${encodeURIComponent(resolvedTemplateName)}`; const rawTemplate = await this.request(templateEndpoint); const template = CoderSDKTemplateSchema.parse(rawTemplate); const templateVersionId = template.active_version_id; diff --git a/src/services/task-runner.ts b/src/services/task-runner.ts index dc4be40..325e74d 100644 --- a/src/services/task-runner.ts +++ b/src/services/task-runner.ts @@ -47,11 +47,13 @@ export interface TaskRunner { /** * Create a task. Returns the existing one if `(taskName, owner)` collides. + * `templateName` overrides the runner-configured default when provided. */ create(params: { taskName: TaskName; owner: string; input: string; + templateName?: string; }): Promise; /** diff --git a/src/webhooks/github/router.test.ts b/src/webhooks/github/router.test.ts index 9ae3bce..e2c90ce 100644 --- a/src/webhooks/github/router.test.ts +++ b/src/webhooks/github/router.test.ts @@ -78,6 +78,7 @@ describe("WebhookRouter", () => { expect(event.repository.owner).toBe("xmtplabs"); expect(event.repository.name).toBe("coder-action"); expect(event.issue.number).toBe(65); + expect(event.issue.id).toBe(4132709157); expect(event.issue.url).toBe( "https://github.com/xmtplabs/coder-action/issues/65", ); diff --git a/src/webhooks/github/router.ts b/src/webhooks/github/router.ts index eb377e5..a049659 100644 --- a/src/webhooks/github/router.ts +++ b/src/webhooks/github/router.ts @@ -164,6 +164,7 @@ export class WebhookRouter { name: payload.repository.name, }, issue: { + id: payload.issue.id, number: payload.issue.number, url: payload.issue.html_url, }, diff --git a/src/workflows/instance-id.test.ts b/src/workflows/instance-id.test.ts index 302187f..7c37946 100644 --- a/src/workflows/instance-id.test.ts +++ b/src/workflows/instance-id.test.ts @@ -8,7 +8,7 @@ describe("buildInstanceId", () => { type: "task_requested", source: { type: "github", installationId: 1 }, repository: { owner: "acme", name: "repo" }, - issue: { number: 42, url: "u" }, + issue: { id: 1, number: 42, url: "u" }, requester: { login: "u", externalId: 1 }, }; const id = buildInstanceId(event, "abc-123"); @@ -88,7 +88,7 @@ describe("buildInstanceId", () => { type: "task_requested", source: { type: "github", installationId: 1 }, repository: { owner: "ACME", name: "Repo.With.Dots" }, - issue: { number: 1, url: "u" }, + issue: { id: 1, number: 1, url: "u" }, requester: { login: "u", externalId: 1 }, }; const id = buildInstanceId(event, "d/e/l"); @@ -100,7 +100,7 @@ describe("buildInstanceId", () => { type: "task_requested", source: { type: "github", installationId: 1 }, repository: { owner: "o", name: "a".repeat(100) }, - issue: { number: 1, url: "u" }, + issue: { id: 1, number: 1, url: "u" }, requester: { login: "u", externalId: 1 }, }; const id = buildInstanceId(event, "d"); diff --git a/src/workflows/steps/create-task.test.ts b/src/workflows/steps/create-task.test.ts index 9f6257d..28dddee 100644 --- a/src/workflows/steps/create-task.test.ts +++ b/src/workflows/steps/create-task.test.ts @@ -1,5 +1,6 @@ import { describe, expect, test, vi } from "vitest"; import type { AppConfig } from "../../config/app-config"; +import type { RepoConfig } from "../../config/repo-config-schema"; import type { TaskRequestedEvent } from "../../events/types"; import { TASK_STATUS_COMMENT_MARKER, @@ -25,14 +26,31 @@ const event: TaskRequestedEvent = { type: "task_requested", source: { type: "github", installationId: 1 }, repository: { owner: "acme", name: "repo" }, - issue: { number: 42, url: "https://github.com/acme/repo/issues/42" }, + issue: { + id: 987654, + number: 42, + url: "https://github.com/acme/repo/issues/42", + }, requester: { login: "alice", externalId: 123 }, }; const config = { coderTaskNamePrefix: "gh", + codeFactoryTemplate: "code-factory", } as unknown as AppConfig; +function makeEnv(repoConfig: RepoConfig | null = null) { + const getRepoConfig = vi.fn(async () => repoConfig); + const stub = { getRepoConfig, setRepoConfig: vi.fn(async () => {}) }; + const env = { + REPO_CONFIG_DO: { + idFromName: vi.fn(() => "stub-id"), + get: vi.fn(() => stub), + }, + } as never; + return { env, getRepoConfig }; +} + function makeCoder(overrides: Record = {}) { return { lookupUser: vi.fn(async () => "coder-user"), @@ -62,7 +80,7 @@ function makeGithub(overrides: Record = {}) { } describe("runCreateTask", () => { - test("emits steps in order: check-github-permission (first), lookup-coder-user, create-coder-task, comment-on-issue, wait-*, update-status-comment", async () => { + test("emits steps in order: check-github-permission (first), lookup-coder-user, lookup-repo-config, create-coder-task, comment-on-issue, wait-*, update-status-comment", async () => { const step = makeStep(); const coder = makeCoder(); const github = makeGithub(); @@ -73,12 +91,14 @@ describe("runCreateTask", () => { github: github as never, config, event, + env: makeEnv().env, }); // With a fast-path `active` observation at pre-poll, waitForTaskActive // emits exactly one step (`wait-lookup-task`). expect(step.calls).toEqual([ "check-github-permission", "lookup-coder-user", + "lookup-repo-config", "create-coder-task", "comment-on-issue", "wait-lookup-task", @@ -98,6 +118,7 @@ describe("runCreateTask", () => { github: github as never, config, event, + env: makeEnv().env, }); // Only the permission check ran. expect(step.calls).toEqual(["check-github-permission"]); @@ -117,6 +138,7 @@ describe("runCreateTask", () => { github: github as never, config, event, + env: makeEnv().env, }); const createIdx = step.do.mock.calls.findIndex( (c: unknown[]) => c[0] === "create-coder-task", @@ -143,6 +165,7 @@ describe("runCreateTask", () => { github: github as never, config, event, + env: makeEnv().env, }); // First commentOnIssue call — the initial "Task created" comment. const firstCall = github.commentOnIssue.mock.calls[0] as unknown as [ @@ -183,6 +206,7 @@ describe("runCreateTask", () => { github: github as never, config, event, + env: makeEnv().env, }); expect(step.calls).toContain("update-status-comment"); @@ -223,6 +247,7 @@ describe("runCreateTask", () => { github: github as never, config, event, + env: makeEnv().env, }); expect(step.calls).toContain("update-status-comment"); @@ -256,6 +281,7 @@ describe("runCreateTask", () => { github: github as never, config, event, + env: makeEnv().env, }); expect(github.commentOnIssue).toHaveBeenCalledTimes(2); for (const call of github.commentOnIssue.mock.calls) { @@ -270,4 +296,81 @@ describe("runCreateTask", () => { expect(body.startsWith(TASK_STATUS_COMMENT_MARKER)).toBe(true); } }); + + test("no repo config → legacy prompt (issue URL) and no templateName override", async () => { + const step = makeStep(); + const coder = makeCoder(); + const github = makeGithub(); + await runCreateTask({ + step: step as never, + coder: coder as never, + github: github as never, + config, + event, + env: makeEnv(null).env, + }); + expect(coder.create).toHaveBeenCalledTimes(1); + const call = coder.create.mock.calls[0] as unknown as [ + { taskName: string; owner: string; input: string; templateName?: string }, + ]; + expect(call[0]).toEqual({ + taskName: "gh-repo-42", + owner: "coder-user", + input: "https://github.com/acme/repo/issues/42", + }); + }); + + test("repo config present → codeFactoryTemplate and JSON TemplateInputs prompt", async () => { + const step = makeStep(); + const coder = makeCoder(); + const github = makeGithub(); + const repoConfig: RepoConfig = { + repositoryId: 1, + repositoryFullName: "acme/repo", + installationId: 99, + settings: { + sandbox: { + size: "large", + docker: true, + // Resolved RepoConfigSettings always carries the canonical + // Kubernetes binary-SI size after schema normalization. + volumes: [{ path: "/data", size: "20Gi" }], + }, + harness: { provider: "codex" }, + scheduled_jobs: [], + }, + }; + await runCreateTask({ + step: step as never, + coder: coder as never, + github: github as never, + config, + event, + env: makeEnv(repoConfig).env, + }); + expect(coder.create).toHaveBeenCalledTimes(1); + const call = coder.create.mock.calls[0] as unknown as [ + { taskName: string; owner: string; input: string; templateName: string }, + ]; + const args = call[0]; + expect(args.templateName).toBe("code-factory"); + const parsed = JSON.parse(args.input); + expect(parsed).toEqual({ + repo_url: "https://github.com/acme/repo", + repo_name: "repo", + ai_prompt: [ + "ISSUE_URL: https://github.com/acme/repo/issues/42", + "REPO_OWNER: acme", + "REPO_NAME: repo", + "ISSUE_ID: 987654", + "", + "Use the /coder-task skill to resolve the issue", + "", + ].join("\n"), + ai_provider: "codex", + extra_volumes: [{ path: "/data", size: "20Gi" }], + size: "large", + docker: true, + }); + }); }); diff --git a/src/workflows/steps/create-task.ts b/src/workflows/steps/create-task.ts index 8a9c33e..7b51829 100644 --- a/src/workflows/steps/create-task.ts +++ b/src/workflows/steps/create-task.ts @@ -1,6 +1,8 @@ import type { WorkflowStep } from "cloudflare:workers"; import { generateTaskName } from "../../actions/task-naming"; import type { AppConfig } from "../../config/app-config"; +import type { RepoConfig } from "../../config/repo-config-schema"; +import type { RepoConfigDO } from "../../durable-objects/repo-config-do"; import type { TaskRequestedEvent } from "../../events/types"; import type { CoderService } from "../../services/coder/service"; import type { GitHubClient } from "../../services/github/client"; @@ -10,6 +12,7 @@ import { buildTaskStatusCommentBody, } from "../task-status-comment"; import { waitForTaskActive } from "../wait-for-task-active"; +import { buildTemplateInputs } from "./template-inputs"; export interface RunCreateTaskContext { step: WorkflowStep; @@ -17,6 +20,7 @@ export interface RunCreateTaskContext { github: GitHubClient; config: AppConfig; event: TaskRequestedEvent; + env: { REPO_CONFIG_DO: DurableObjectNamespace }; } /** @@ -29,7 +33,7 @@ export interface RunCreateTaskContext { * instances or raw SDK responses. See src/workflows/AGENTS.md. */ export async function runCreateTask(ctx: RunCreateTaskContext): Promise { - const { step, coder, github, config, event } = ctx; + const { step, coder, github, config, event, env } = ctx; const hasPermission = await step.do("check-github-permission", async () => github.checkActorPermission( @@ -59,9 +63,39 @@ export async function runCreateTask(ctx: RunCreateTaskContext): Promise { }), ); - const prompt = event.issue.url; // Default prompt = issue URL + const repoConfig = await step.do( + "lookup-repo-config", + async () => { + const fullName = `${event.repository.owner}/${event.repository.name}`; + const id = env.REPO_CONFIG_DO.idFromName(fullName); + const stub = env.REPO_CONFIG_DO.get(id); + return await stub.getRepoConfig(); + }, + ); + + // When a repo config is present we target the new template (`task-beta`) + // with a JSON `TemplateInputs` payload. Otherwise we fall back to the + // legacy template with the issue URL as a bare prompt. + const { prompt, templateName } = repoConfig + ? { + prompt: JSON.stringify( + buildTemplateInputs({ + repository: event.repository, + issue: { id: event.issue.id, url: event.issue.url }, + settings: repoConfig.settings, + }), + ), + templateName: config.codeFactoryTemplate, + } + : { prompt: event.issue.url, templateName: undefined }; + const created = await step.do("create-coder-task", async () => { - const task = await coder.create({ taskName, owner, input: prompt }); + const task = await coder.create({ + taskName, + owner, + input: prompt, + ...(templateName ? { templateName } : {}), + }); // Scalar projection per spec §4 serialization table. `taskId` keeps the // cached step output self-sufficient for any follow-up step that needs // to operate on the task by id without re-querying Coder. diff --git a/src/workflows/steps/template-inputs.ts b/src/workflows/steps/template-inputs.ts new file mode 100644 index 0000000..9f03bbc --- /dev/null +++ b/src/workflows/steps/template-inputs.ts @@ -0,0 +1,74 @@ +import { z } from "zod"; +import type { RepoConfigSettings } from "../../config/repo-config-schema"; + +/** + * Zod schema for the JSON payload sent as `input` to the new Coder template + * (`code-factory`). When a repo has a `.code-factory/config.toml`, the + * workflow serializes an instance of this shape and passes it verbatim as + * the task input; the template parses it on the Terraform side. + */ +export const TemplateInputsSchema = z.object({ + repo_url: z.string(), + base_branch: z.string().optional(), + repo_name: z.string(), + ai_prompt: z.string(), + ai_provider: z.enum(["claude_code", "codex"]), + extra_volumes: z + .array(z.object({ path: z.string(), size: z.string() })) + .optional(), + size: z.enum(["small", "medium", "large"]), + docker: z.boolean(), +}); + +export type TemplateInputs = z.infer; + +export interface BuildTemplateInputsParams { + repository: { owner: string; name: string }; + issue: { id: number; url: string }; + settings: RepoConfigSettings; +} + +/** + * Compose the `ai_prompt` block consumed by the `/coder-task` skill. The + * fields are fixed key/value lines followed by the instruction to invoke the + * skill, separated by a blank line. Trailing newline is intentional. + */ +function buildAiPrompt(params: { + issueUrl: string; + repoOwner: string; + repoName: string; + issueId: number; +}): string { + return `ISSUE_URL: ${params.issueUrl} +REPO_OWNER: ${params.repoOwner} +REPO_NAME: ${params.repoName} +ISSUE_ID: ${params.issueId} + +Use the /coder-task skill to resolve the issue +`; +} + +/** + * Map a (repository, issue, resolved repo config) triple to the JSON payload + * that the new Coder template consumes. Pure — no I/O, safe inside `step.do`. + */ +export function buildTemplateInputs( + params: BuildTemplateInputsParams, +): TemplateInputs { + const { repository, issue, settings } = params; + const volumes = settings.sandbox.volumes; + return { + repo_url: `https://github.com/${repository.owner}/${repository.name}`, + repo_name: repository.name, + ai_prompt: buildAiPrompt({ + issueUrl: issue.url, + repoOwner: repository.owner, + repoName: repository.name, + issueId: issue.id, + }), + ai_provider: settings.harness.provider, + ...(volumes.length > 0 ? { extra_volumes: volumes } : {}), + size: settings.sandbox.size, + docker: settings.sandbox.docker, + }; +} diff --git a/src/workflows/task-runner-workflow.test.ts b/src/workflows/task-runner-workflow.test.ts index 1c1770b..1acfe57 100644 --- a/src/workflows/task-runner-workflow.test.ts +++ b/src/workflows/task-runner-workflow.test.ts @@ -67,7 +67,11 @@ describe("TaskRunnerWorkflow dispatch — task_requested", () => { type: "task_requested", source: { type: "github", installationId: 1 }, repository: { owner: "acme", name: "repo" }, - issue: { number: 1, url: "https://github.com/acme/repo/issues/1" }, + issue: { + id: 1001, + number: 1, + url: "https://github.com/acme/repo/issues/1", + }, requester: { login: "alice", externalId: 42 }, }; await env.TASK_RUNNER_WORKFLOW.create({ id: instanceId, params }); @@ -116,7 +120,11 @@ describe("TaskRunnerWorkflow dispatch — task_requested", () => { type: "task_requested", source: { type: "github", installationId: 1 }, repository: { owner: "acme", name: "repo" }, - issue: { number: 1, url: "https://github.com/acme/repo/issues/1" }, + issue: { + id: 1001, + number: 1, + url: "https://github.com/acme/repo/issues/1", + }, requester: { login: "alice", externalId: 42 }, }; await env.TASK_RUNNER_WORKFLOW.create({ id: instanceId, params }); @@ -183,7 +191,11 @@ describe("TaskRunnerWorkflow dispatch — task_requested", () => { }, }, repository: { owner: "acme", name: "repo" }, - issue: { number: 1, url: "https://github.com/acme/repo/issues/1" }, + issue: { + id: 1001, + number: 1, + url: "https://github.com/acme/repo/issues/1", + }, requester: { login: "alice", externalId: 42 }, }; await env.TASK_RUNNER_WORKFLOW.create({ id: instanceId, params }); @@ -247,7 +259,11 @@ describe("TaskRunnerWorkflow dispatch — task_requested", () => { type: "task_requested", source: { type: "github", installationId: 1 }, repository: { owner: "acme", name: "repo" }, - issue: { number: 1, url: "https://github.com/acme/repo/issues/1" }, + issue: { + id: 1001, + number: 1, + url: "https://github.com/acme/repo/issues/1", + }, requester: { login: "alice", externalId: 42 }, }; await env.TASK_RUNNER_WORKFLOW.create({ id: instanceId, params }); diff --git a/src/workflows/task-runner-workflow.ts b/src/workflows/task-runner-workflow.ts index 1f70d7f..5ca8485 100644 --- a/src/workflows/task-runner-workflow.ts +++ b/src/workflows/task-runner-workflow.ts @@ -96,7 +96,14 @@ export class TaskRunnerWorkflow extends WorkflowEntrypoint< switch (payload.type) { case "task_requested": - await runCreateTask({ step, coder, github, config, event: payload }); + await runCreateTask({ + step, + coder, + github, + config, + event: payload, + env: { REPO_CONFIG_DO: this.env.REPO_CONFIG_DO }, + }); break; case "task_closed": await runCloseTask({ step, coder, github, config, event: payload }); diff --git a/terraform/.terraform.lock.hcl b/terraform/.terraform.lock.hcl deleted file mode 100644 index 36078d1..0000000 --- a/terraform/.terraform.lock.hcl +++ /dev/null @@ -1,45 +0,0 @@ -# This file is maintained automatically by "terraform init". -# Manual edits may be lost in future updates. - -provider "registry.terraform.io/coder/coder" { - version = "2.15.0" - constraints = ">= 2.12.0, >= 2.13.0" - hashes = [ - "h1:tYNavbEhcqzlIwpSe1GMrV/726+u703m2XGbinj3LPg=", - "zh:10897edfe4ecb975ce11b6b2dfb37317f07c725404d2a60b5fa4e114808259b9", - "zh:10b1af473883a9524353011943cfab89b401fc84ed38608a798e377aaa4ecebf", - "zh:4678c3b329e47a4c3fb9683db4850470e8ef6ede570f6a2bb99701f1125b4215", - "zh:4c2df7c4d8f0fc8546536c886c0984e7173dcc2d3759218fdae3d4bf2703af14", - "zh:72e0b7297f3e20abe2a81e34fe4976caa79691857b6355a2b9492f3ddc85aa9e", - "zh:773077f4eaaf6a31154f1d8aa63b4ef3bbe34104271c4d9cf065261cba8814a9", - "zh:80b1eb2aa2d18ce2ff26e02fa179994fd137031c9c4e2cce0d547b126eadf62e", - "zh:8efdf98494ec442630efb48aabc8dbf10b03254f3f2a2247f519dbf005c5aabc", - "zh:a65d987f531bf0a41cc5d68fd46f675cb37e8570a8a42579bc30e22312b3df4d", - "zh:bb2c57695e801994604542791ff87ed4b7e0d94ffa9d4c6a0ec34260f4616a49", - "zh:be9a5086d498b941e08e9c30b4de5151b15dfab526083387dd47e9451d7bde53", - "zh:de8fe0131db31511c8d4e02b1b58aa2b2bc82ca50188f2ed1d9d731d70321fb2", - "zh:e1d95002571d9025631f9dc98f441e22cd68783a27e9e35925bda21dbd94f904", - "zh:eb0de36ba625d187dce45a24ad9e724bafff821fb466d014cc7d9a02d2d72309", - "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", - ] -} - -provider "registry.terraform.io/hashicorp/kubernetes" { - version = "2.35.1" - constraints = "~> 2.35.0" - hashes = [ - "h1:zgXeWvp4//Ry+4glwNrLMpPFOU8QBQlARNmR9WCNe9o=", - "zh:12212ca5ae47823ce14bfafb909eeb6861faf1e2435fb2fc4a8b334b3544b5f5", - "zh:3f49b3d77182df06b225ab266667de69681c2e75d296867eb2cf06a8f8db768c", - "zh:40832494d19f8a2b3cd0c18b80294d0b23ef6b82f6f6897b5fe00248a9997460", - "zh:739a5ddea61a77925ee7006a29c8717377a2e9d0a79a0bbd98738d92eec12c0d", - "zh:a02b472021753627c5c39447a56d125a32214c29ff9108fc499f2dcdf4f1cc4f", - "zh:b78865b3867065aa266d6758c9601a2756741478f5735a838c20d633d65e085b", - "zh:d362e87464683f5632790e66920ea803adb54c2bc0cb24b6fd9a314d2b1efffd", - "zh:d98206fe88c2c9a52b8d2d0cb2c877c812a4a51d19f9d8428e63cbd5fd8a304d", - "zh:dfa320946b1ce3f3615c42b3447a28dc9f604c06d8b9a6fe289855ab2ade4d11", - "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", - "zh:fc1debd2e695b5222d2ccc8b24dab65baba4ee2418ecce944e64d42e79474cb5", - "zh:fdaf960443720a238c09e519aeb30faf74f027ac5d1e0a309c3b326888e031d7", - ] -} diff --git a/terraform/.tflint.hcl b/terraform/.tflint.hcl deleted file mode 100644 index 427121c..0000000 --- a/terraform/.tflint.hcl +++ /dev/null @@ -1,4 +0,0 @@ -plugin "terraform" { - enabled = true - preset = "recommended" -} diff --git a/terraform/README.md b/terraform/README.md deleted file mode 100644 index 82f66e2..0000000 --- a/terraform/README.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -display_name: Task -description: An ephemeral template for autonomous AI coding tasks -icon: ../../../site/static/emojis/1f916.png -maintainer_github: neekolas -verified: true -tags: [] ---- - -# Coder Tasks - -Coder Tasks let you assign a GitHub issue to an AI agent that autonomously resolves it — reading the issue, writing code, running tests, and opening a PR. The agent runs in an ephemeral Coder workspace that is created when the task starts and destroyed when the issue closes. - -## How It Works - -``` -Assign issue to agent user - │ - ▼ -GitHub App creates a Coder workspace - │ - ▼ -Agent reads the issue, explores the repo, writes a spec - │ - ▼ -Agent implements the fix/feature and opens a PR - │ - ▼ -Humans review the PR; comments are forwarded to the agent - │ - ▼ -CI failures are automatically sent to the agent to fix - │ - ▼ -Issue closes → workspace is deleted -``` - -The system has two parts: - -- **This template** provisions an ephemeral Kubernetes pod with Claude Code running in fully autonomous mode. It parses a GitHub issue URL from the task prompt, clones the repo, and instructs the agent to resolve the issue. -- **[coder-action](https://github.com/xmtplabs/coder-action)** is a GitHub App that handles the lifecycle — creating workspaces when issues are assigned, forwarding PR comments and CI failures to the agent, and cleaning up when issues close. - -## Adding Tasks to Your Repo - -### Prerequisites - -- A running Coder deployment with this template installed (e.g. [sandbox.xmtp.team](https://sandbox.xmtp.team)) -- A GitHub user account for the agent (e.g. `xmtp-coder-agent`). Already exists. - -### Setup - -Install the [coder-action](https://github.com/xmtplabs/coder-action) GitHub App on your organization (all repos or select repos). The app receives webhooks directly from GitHub — no workflow files are needed in your repository. - -The app handles five event types: - -| Trigger | What happens | -|---------|-------------| -| Issue assigned to agent user | Creates a Coder workspace and starts the task | -| Issue closed | Stops and deletes the workspace | -| Comment on the issue | Forwarded to the running agent | -| Comment on the agent's PR | Forwarded to the running agent | -| CI check fails on agent's PR | Failed job logs are sent to the agent | - -See the [coder-action README](https://github.com/xmtplabs/coder-action/blob/main/README.md) for GitHub App registration, deployment, and configuration details. This is already done for `xmtp` and `xmtplabs` repos. - -### Repository Requirements - -Your repo should have a `.devcontainer/devcontainer.json` so the workspace can build a development environment with the right toolchain. A minimal example: - -```jsonc -// .devcontainer/devcontainer.json -{ - "image": "mcr.microsoft.com/devcontainers/base:ubuntu", - "features": { - "ghcr.io/devcontainers/features/docker-outside-of-docker:1": {} - } -} -``` - -## Using Tasks - -### Assigning an Issue - -To start a task, assign a GitHub issue to your agent user (e.g. `xmtp-coder-agent`). The app validates that the assigner has write access to the repo, then creates a workspace. - -A comment is posted on the issue with a link to the running task in the Coder dashboard. - -### Interacting with the Agent - -- **Issue comments** — post a comment on the issue to give the agent new instructions or context. The comment is forwarded to the agent's active session. -- **PR review comments** — review the agent's PR as you would any other. Comments are forwarded to the agent, which will attempt to address them. -- **CI failures** — if a monitored workflow fails on the agent's PR, the failed job logs are automatically sent to the agent so it can self-correct. - -### Monitoring Progress - -Open the task link from the issue comment to view the agent's terminal session in the Coder dashboard. You can watch it work in real time. - -### Stopping a Task - -Close the GitHub issue. The app deletes the workspace and frees all resources. - -## What the Agent Does - -Inside the workspace, the agent follows the [coder-task](https://github.com/xmtplabs/code-factory/blob/main/skills/coder-task/SKILL.md) workflow: - -1. Reads the GitHub issue -2. Forks the repo and creates a working branch -3. Explores the codebase to understand relevant code and tests -4. Writes a spec (posted as an issue comment) if the issue doesn't already contain one -5. Decomposes the spec into implementation tasks -6. Implements the changes with tests -7. Opens a PR that references the issue - -## Template Details - -The template provisions: - -- An ephemeral Kubernetes pod (destroyed when the task ends) -- A devcontainer built from the repo's `.devcontainer/devcontainer.json` via [envbuilder](https://github.com/coder/envbuilder) -- Docker-in-Docker sidecar for container builds -- Claude Code in fully autonomous mode with LSP support (Go, Rust, TypeScript) -- 30 GB workspace disk + 1 GB persistent agent state - -Resources: 2 CPU / 8 GB memory guaranteed, burst to 8 CPU / 24 GB. diff --git a/terraform/main.tf b/terraform/main.tf deleted file mode 100644 index b459844..0000000 --- a/terraform/main.tf +++ /dev/null @@ -1,355 +0,0 @@ -terraform { - required_providers { - coder = { source = "coder/coder" } - kubernetes = { source = "hashicorp/kubernetes", version = "~> 2.35.0" } - } -} - -provider "kubernetes" { - # Coder injects cluster credentials via the provisioner's service account -} - -# ─── Variables (baked into the template, not user-facing) ──────────────────── - -variable "claude_code_oauth_token" { - type = string - sensitive = true - description = "Claude Code OAuth token for AI agent authentication" - default = "" -} - -variable "github_pat" { - type = string - sensitive = true - description = "GitHub PAT for a non-org-member service account. Used to fork repos, comment on issues, and create cross-fork PRs." -} - -variable "ai_provider" { - type = string - description = "AI coding agent: claude_code or codex" - default = "claude_code" - - validation { - condition = contains(["claude_code", "codex"], var.ai_provider) - error_message = "ai_provider must be 'claude_code' or 'codex'" - } -} - -variable "codex_auth_token_json" { - type = string - sensitive = true - description = "Base64-encoded Codex auth.json for CI/CD file-based authentication" - default = "" -} - -# ─── Data sources ───────────────────────────────────────────────────────────── - -data "coder_workspace" "me" {} -data "coder_workspace_owner" "me" {} -data "coder_task" "me" {} - -# ─── Locals ─────────────────────────────────────────────────────────────────── - -locals { - use_claude = var.ai_provider == "claude_code" - - # ── Prompt decode ────────────────────────────────────────────────────── - raw_prompt = data.coder_task.me.prompt - parsed = try(jsondecode(local.raw_prompt), null) - json_valid = can(local.parsed.repo_url) && can(local.parsed.repo_name) && can(local.parsed.ai_prompt) - - # ── Required fields (validated in preconditions) ─────────────────────── - repo_url = try(local.parsed.repo_url, "") - repo_name = try(local.parsed.repo_name, "") - ai_prompt = try(local.parsed.ai_prompt, "") - - # ── Optional fields (defaults applied here) ──────────────────────────── - base_branch_raw = try(local.parsed.base_branch, null) - base_branch = local.base_branch_raw == null ? "" : local.base_branch_raw - size = try(local.parsed.size, "large") - docker = try(local.parsed.docker, false) - extra_volumes = try(local.parsed.extra_volumes, []) - - # ── Derived ──────────────────────────────────────────────────────────── - work_dir = "/workspaces/${local.repo_name}" - git_url = local.base_branch == "" ? local.repo_url : "${local.repo_url}#refs/heads/${local.base_branch}" - - # ── Resource profiles (dev container) ──────────────────────────────────── - size_profiles = { - small = { - requests = { cpu = "1", memory = "4Gi", "ephemeral-storage" = "10Gi" } - limits = { cpu = "4", memory = "8Gi", "ephemeral-storage" = "20Gi" } - } - medium = { - requests = { cpu = "1", memory = "4Gi", "ephemeral-storage" = "20Gi" } - limits = { cpu = "8", memory = "12Gi", "ephemeral-storage" = "30Gi" } - } - large = { - requests = { cpu = "2", memory = "8Gi", "ephemeral-storage" = "30Gi" } - limits = { cpu = "8", memory = "24Gi", "ephemeral-storage" = "50Gi" } - } - } - # Fallback keeps the map lookup from erroring before the size precondition - # below can fire with a clean error message. - dev_resources = try(local.size_profiles[local.size], local.size_profiles["large"]) - - # ── dind resources (constant across sizes) ─────────────────────────────── - dind_resources = { - requests = { cpu = "250m", memory = "1Gi", "ephemeral-storage" = "5Gi" } - limits = { cpu = "2", memory = "4Gi", "ephemeral-storage" = "20Gi" } - } - - # ── Extra volumes mapped to workspace-pod module shape ───────────────────── - # PVC name is derived as `replace(trim(path, "/"), "/", "-")` — e.g., - # "/home/runner/cache" → "home-runner-cache", "/cache" → "cache". - mapped_extra_volumes = [ - for v in local.extra_volumes : { - name = replace(trim(v.path, "/"), "/", "-") - size = v.size - mount_path = v.path - persistent = true - count = 1 - containers = "dev" - } - ] - - # ── Docker cache volume (gated on local.docker) ──────────────────────────── - docker_cache_volume = local.docker ? [{ - name = "docker-cache" - size = "10Gi" - mount_path = "/var/lib/docker" - persistent = false - count = 1 - containers = "dind" - }] : [] - - # ── Composed volumes list passed to workspace-pod ────────────────────────── - all_volumes = concat(local.docker_cache_volume, local.mapped_extra_volumes) -} - -# ─── Coder Agent ───────────────────────────────────────────────────────────── - -resource "coder_agent" "dev" { - count = data.coder_workspace.me.start_count - arch = "amd64" - auth = "token" - os = "linux" - dir = local.work_dir - connection_timeout = 600 - - lifecycle { - precondition { - condition = local.json_valid - error_message = "data.coder_task.me.prompt must be valid JSON matching the TaskMetadata schema" - } - precondition { - condition = local.repo_url != "" - error_message = "TaskMetadata.repo_url is required and must be non-blank" - } - precondition { - condition = local.repo_name != "" - error_message = "TaskMetadata.repo_name is required and must be non-blank" - } - precondition { - condition = local.ai_prompt != "" - error_message = "TaskMetadata.ai_prompt is required and must be non-blank" - } - precondition { - condition = contains(["small", "medium", "large"], local.size) - error_message = "TaskMetadata.size must be one of 'small', 'medium', 'large'" - } - precondition { - condition = !local.use_claude || var.claude_code_oauth_token != "" - error_message = "claude_code_oauth_token is required when ai_provider is claude_code" - } - precondition { - condition = local.use_claude || var.codex_auth_token_json != "" - error_message = "codex_auth_token_json is required when ai_provider is codex" - } - } - - env = { - GITHUB_TOKEN = var.github_pat - } - - startup_script = <<-EOT - # Trust GitHub's SSH host key so git operations don't prompt - mkdir -p ~/.ssh && chmod 700 ~/.ssh - ssh-keyscan -t ed25519 github.com >> ~/.ssh/known_hosts 2>/dev/null - - # Install gh CLI if missing (Debian; https://github.com/cli/cli/blob/trunk/docs/install_linux.md#debian) - if ! command -v gh >/dev/null 2>&1; then - SUDO="" - [ "$(id -u)" -ne 0 ] && SUDO="sudo" - (type -p wget >/dev/null || ($SUDO apt update && $SUDO apt-get install wget -y)) \ - && $SUDO mkdir -p -m 755 /etc/apt/keyrings \ - && out=$(mktemp) && wget -nv -O"$out" https://cli.github.com/packages/githubcli-archive-keyring.gpg \ - && cat "$out" | $SUDO tee /etc/apt/keyrings/githubcli-archive-keyring.gpg > /dev/null \ - && $SUDO chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg \ - && $SUDO mkdir -p -m 755 /etc/apt/sources.list.d \ - && echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | $SUDO tee /etc/apt/sources.list.d/github-cli.list > /dev/null \ - && $SUDO apt update \ - && $SUDO apt install gh -y - fi - - # Authenticate gh CLI with the baked-in PAT and configure git credentials - echo "$GITHUB_TOKEN" | gh auth login --with-token - gh auth setup-git - - # Configure git identity from the GitHub PAT user - git config --global user.name "$(gh api user --jq .login)" - git config --global user.email "$(gh api user --jq '.id | tostring + "+\(.login)@users.noreply.github.com"')" - EOT - - metadata { - key = "cpu" - display_name = "CPU Usage" - interval = 5 - timeout = 5 - script = "coder stat cpu" - } - - metadata { - key = "memory" - display_name = "Memory Usage" - interval = 5 - timeout = 5 - script = "coder stat mem" - } -} - -# ─── Claude Code ────────────────────────────────────────────────────────���──── - -module "claude-code" { - count = local.use_claude ? data.coder_workspace.me.start_count : 0 - source = "./modules/claude-code-agent" - - start_count = 1 - agent_id = coder_agent.dev[0].id - work_dir = local.work_dir - oauth_token = var.claude_code_oauth_token - ai_prompt = local.ai_prompt - -} - -# ─── Codex ─────────────────────────────────────────────────────────────────── - -module "codex" { - count = local.use_claude ? 0 : data.coder_workspace.me.start_count - source = "registry.coder.com/coder-labs/codex/coder" - version = "4.3.1" - agent_id = coder_agent.dev[count.index].id - workdir = local.work_dir - ai_prompt = replace(local.ai_prompt, "/coder-task", "$coder-task") - - pre_install_script = <<-EOT - # Symlink persistent agent state - mkdir -p /persist/agent-state/codex /persist/agent-state/codex-module - ln -sfn /persist/agent-state/codex "$HOME/.codex" - ln -sfn /persist/agent-state/codex-module "$HOME/.codex-module" - - # Install code-factory plugin for Codex - git clone --depth 1 https://github.com/xmtplabs/code-factory.git /tmp/code-factory - - mkdir -p ~/.agents/skills - cp -R /tmp/code-factory/skills/* ~/.agents/skills/ - - mkdir -p ~/.codex/agents - cp -R /tmp/code-factory/.codex/agents/* ~/.codex/agents/ - - mkdir -p ~/.agents/plugins/plugins - cp -R /tmp/code-factory ~/.agents/plugins/plugins/code-factory - cat > ~/.agents/plugins/marketplace.json <<'MKJSON' - { - "name": "personal-plugins", - "interface": { - "displayName": "Personal Plugins" - }, - "plugins": [ - { - "name": "code-factory", - "source": { - "source": "local", - "path": "./plugins/code-factory" - }, - "policy": { - "installation": "INSTALLED_BY_DEFAULT", - "authentication": "ON_INSTALL" - }, - "category": "Development" - } - ] - } - MKJSON - - rm -rf /tmp/code-factory - EOT - - post_install_script = <<-EOT - echo -n '${var.codex_auth_token_json}' | base64 -d > "$HOME/.codex/auth.json" - chmod 600 "$HOME/.codex/auth.json" - EOT - - base_config_toml = <<-EOT - sandbox_mode = "danger-full-access" - approval_policy = "never" - cli_auth_credentials_store = "file" - [projects."${local.work_dir}"] - trust_level = "trusted" - EOT -} - -# ─── Workspace Pod ──────────────────────────────────��──────────────────────── - -module "workspace" { - source = "./modules/workspace-pod" - - workspace_name = data.coder_workspace.me.name - workspace_id = data.coder_workspace.me.id - start_count = data.coder_workspace.me.start_count - owner_name = data.coder_workspace_owner.me.full_name - owner_email = data.coder_workspace_owner.me.email - owner_username = data.coder_workspace_owner.me.name - - agent_token = try(coder_agent.dev[0].token, "") - agent_init_script = try(coder_agent.dev[0].init_script, "") - access_url = data.coder_workspace.me.access_url - - deployment_type = "pod" - restart_policy = "Never" - termination_grace_period_seconds = 30 - do_not_disrupt = true - docker_enabled = local.docker - git_url = local.git_url - workspace_size = "30Gi" - app_name = "coder-task" - name_prefix = "task" - - dev_resources = local.dev_resources - dind_resources = local.dind_resources - - volumes = local.all_volumes -} - -# ─── AI Task ───────────────────────────────────────────────────────���───────── - -resource "coder_ai_task" "task" { - count = data.coder_workspace.me.start_count - app_id = local.use_claude ? module.claude-code[0].task_app_id : module.codex[0].task_app_id -} - -# ─── Dashboard metadata ─────────────────────────────��──────────────────────── - -resource "coder_metadata" "task_info" { - count = data.coder_workspace.me.start_count - resource_id = coder_agent.dev[count.index].id - - item { - key = "pod" - value = module.workspace.pod_name - } - item { - key = "repo" - value = local.repo_url - } -} diff --git a/terraform/modules/claude-code-agent/.terraform.lock.hcl b/terraform/modules/claude-code-agent/.terraform.lock.hcl deleted file mode 100644 index da7746b..0000000 --- a/terraform/modules/claude-code-agent/.terraform.lock.hcl +++ /dev/null @@ -1,25 +0,0 @@ -# This file is maintained automatically by "terraform init". -# Manual edits may be lost in future updates. - -provider "registry.terraform.io/coder/coder" { - version = "2.15.0" - constraints = ">= 2.12.0, >= 2.13.0" - hashes = [ - "h1:tYNavbEhcqzlIwpSe1GMrV/726+u703m2XGbinj3LPg=", - "zh:10897edfe4ecb975ce11b6b2dfb37317f07c725404d2a60b5fa4e114808259b9", - "zh:10b1af473883a9524353011943cfab89b401fc84ed38608a798e377aaa4ecebf", - "zh:4678c3b329e47a4c3fb9683db4850470e8ef6ede570f6a2bb99701f1125b4215", - "zh:4c2df7c4d8f0fc8546536c886c0984e7173dcc2d3759218fdae3d4bf2703af14", - "zh:72e0b7297f3e20abe2a81e34fe4976caa79691857b6355a2b9492f3ddc85aa9e", - "zh:773077f4eaaf6a31154f1d8aa63b4ef3bbe34104271c4d9cf065261cba8814a9", - "zh:80b1eb2aa2d18ce2ff26e02fa179994fd137031c9c4e2cce0d547b126eadf62e", - "zh:8efdf98494ec442630efb48aabc8dbf10b03254f3f2a2247f519dbf005c5aabc", - "zh:a65d987f531bf0a41cc5d68fd46f675cb37e8570a8a42579bc30e22312b3df4d", - "zh:bb2c57695e801994604542791ff87ed4b7e0d94ffa9d4c6a0ec34260f4616a49", - "zh:be9a5086d498b941e08e9c30b4de5151b15dfab526083387dd47e9451d7bde53", - "zh:de8fe0131db31511c8d4e02b1b58aa2b2bc82ca50188f2ed1d9d731d70321fb2", - "zh:e1d95002571d9025631f9dc98f441e22cd68783a27e9e35925bda21dbd94f904", - "zh:eb0de36ba625d187dce45a24ad9e724bafff821fb466d014cc7d9a02d2d72309", - "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", - ] -} diff --git a/terraform/modules/claude-code-agent/main.tf b/terraform/modules/claude-code-agent/main.tf deleted file mode 100644 index a379d45..0000000 --- a/terraform/modules/claude-code-agent/main.tf +++ /dev/null @@ -1,61 +0,0 @@ -terraform { - required_providers { - coder = { source = "coder/coder" } - } -} - -# ─── Locals ────────────────────────────────────────────────────────────────── - -locals { - enabled_plugins = { for p in var.plugins : p => true } - - marketplace_map = { for m in var.marketplaces : split("/", m)[1] => { - source = { source = "github", repo = m } - } } - - settings = jsonencode({ - permissions = { - defaultMode = "bypassPermissions" - } - enableRemoteControl = true - skipDangerousModePermissionPrompt = true - extraKnownMarketplaces = local.marketplace_map - enabledPlugins = local.enabled_plugins - }) - - plugin_install_commands = join("\n", [for p in var.plugins : "claude plugin install ${p} --scope user"]) - - post_install_script = join("\n", compact([ - var.post_install_script, - local.plugin_install_commands, - ])) -} - -# ─── Claude Code Module ────────────────────────────────────────────────────── - -module "claude-code" { - count = var.start_count - source = "registry.coder.com/coder/claude-code/coder" - version = "4.9.1" - agent_id = var.agent_id - workdir = var.work_dir - model = "opus" - claude_code_oauth_token = var.oauth_token - - pre_install_script = <<-EOT - # Symlink persistent agent state into $HOME so it survives workspace restarts - mkdir -p /persist/agent-state/claude /persist/agent-state/claude-module - ln -sfn /persist/agent-state/claude "$HOME/.claude" - ln -sfn /persist/agent-state/claude-module "$HOME/.claude-module" - - # Configure Claude Code settings - mkdir -p ~/.claude - cat > ~/.claude/settings.json <<'SETTINGS' - ${local.settings} - SETTINGS - EOT - - post_install_script = local.post_install_script - ai_prompt = var.ai_prompt - mcp = var.mcp != "" ? var.mcp : null -} diff --git a/terraform/modules/claude-code-agent/outputs.tf b/terraform/modules/claude-code-agent/outputs.tf deleted file mode 100644 index 7f5b4e5..0000000 --- a/terraform/modules/claude-code-agent/outputs.tf +++ /dev/null @@ -1,4 +0,0 @@ -output "task_app_id" { - description = "Claude Code task app ID (for coder_ai_task)" - value = length(module.claude-code) > 0 ? module.claude-code[0].task_app_id : "" -} diff --git a/terraform/modules/claude-code-agent/variables.tf b/terraform/modules/claude-code-agent/variables.tf deleted file mode 100644 index ce52892..0000000 --- a/terraform/modules/claude-code-agent/variables.tf +++ /dev/null @@ -1,57 +0,0 @@ -variable "start_count" { - type = number - description = "data.coder_workspace.me.start_count" -} - -variable "agent_id" { - type = string - description = "coder_agent resource ID" -} - -variable "work_dir" { - type = string - description = "Working directory for Claude Code" -} - -variable "oauth_token" { - type = string - sensitive = true - description = "Claude Code OAuth token" -} - -variable "ai_prompt" { - type = string - description = "Optional AI prompt. When set, enables autonomous mode (skipDangerousModePermissionPrompt)" - default = "" -} - -variable "marketplaces" { - type = list(string) - description = "Extra known marketplaces added to settings.json" - default = ["xmtplabs/code-factory"] -} - -variable "plugins" { - type = list(string) - description = "Plugins to install via 'claude plugin install --scope user' and enable in settings" - default = [ - "code-factory@code-factory", - "ralph-loop@claude-plugins-official", - "code-simplifier@claude-plugins-official", - "rust-analyzer-lsp@claude-plugins-official", - "gopls-lsp@claude-plugins-official", - "typescript-lsp@claude-plugins-official", - ] -} - -variable "mcp" { - type = string - description = "MCP server configuration JSON string passed to the Claude Code module" - default = "" -} - -variable "post_install_script" { - type = string - description = "Script to run after Claude Code and plugin installation" - default = "" -} diff --git a/terraform/modules/workspace-pod/main.tf b/terraform/modules/workspace-pod/main.tf deleted file mode 100644 index f3f1ec1..0000000 --- a/terraform/modules/workspace-pod/main.tf +++ /dev/null @@ -1,521 +0,0 @@ -terraform { - required_providers { - kubernetes = { source = "hashicorp/kubernetes", version = "~> 2.35.0" } - } -} - -# ─── Locals ────────────────────────────────────────────────────────────────── - -locals { - hostname = lower(var.workspace_name) - ws_id = substr(var.workspace_id, 0, 8) - slug = "${local.hostname}-${local.ws_id}" - - labels = { - "app.kubernetes.io/name" = var.app_name - "app.kubernetes.io/instance" = local.hostname - "app.kubernetes.io/managed-by" = "coder" - } - - annotations = { - "coder.com/owner-name" = var.owner_name - "coder.com/owner-email" = var.owner_email - "coder.com/owner" = var.owner_username - "coder.com/workspace" = var.workspace_name - } - - # Standard setup script + optional caller additions - setup_script = join("; ", compact([ - "chown -R 1000:1000 /workspaces /persist/agent-state", - var.setup_script, - ])) - - # Init script: drop to UID 1000 if root, then run agent init - init_script = join(" && ", [ - "echo ${base64encode(var.agent_init_script)} | base64 -d > /tmp/init.sh", - "chmod +x /tmp/init.sh", - "if [ \"$(id -u)\" = \"0\" ]; then export HOME=$(getent passwd 1000 | cut -d: -f6); exec setpriv --reuid=1000 --regid=1000 --init-groups /bin/bash /tmp/init.sh; else exec /bin/bash /tmp/init.sh; fi", - ]) - - # Filter volumes by count > 0 - active_volumes = [for v in var.volumes : v if v.count > 0] - - # Volumes for dev container (all standard + volumes with containers "dev" or "both") - dev_extra_mounts = [for v in local.active_volumes : v if contains(["dev", "both"], v.containers)] - dind_extra_mounts = [for v in local.active_volumes : v if contains(["dind", "both"], v.containers)] - - pod_annotations = var.do_not_disrupt ? merge(local.annotations, { - "karpenter.sh/do-not-disrupt" = "true" - }) : local.annotations -} - -# ─── Standard PVCs ─────────────────────────────────────────────────────────── - -resource "kubernetes_persistent_volume_claim_v1" "workspace" { - metadata { - name = "${var.name_prefix}-workspace-${local.slug}" - namespace = "coder" - labels = local.labels - annotations = local.annotations - } - - spec { - access_modes = ["ReadWriteOnce"] - storage_class_name = "gp3" - - resources { - requests = { - storage = var.workspace_size - } - } - } - - wait_until_bound = false - - lifecycle { - ignore_changes = [spec[0].resources[0].requests] - } -} - -resource "kubernetes_persistent_volume_claim_v1" "agent_state" { - metadata { - name = "${var.name_prefix}-agent-state-${local.slug}" - namespace = "coder" - labels = local.labels - annotations = local.annotations - } - - spec { - access_modes = ["ReadWriteOnce"] - storage_class_name = "gp3" - - resources { - requests = { - storage = "1Gi" - } - } - } - - wait_until_bound = false - - lifecycle { - ignore_changes = [spec[0].resources[0].requests] - } -} - -# ─── Extra PVCs (persistent volumes from var.volumes) ──────────────────────── - -resource "kubernetes_persistent_volume_claim_v1" "extra" { - for_each = { for v in local.active_volumes : v.name => v if v.persistent } - - metadata { - name = "${each.key}-${local.slug}" - namespace = "coder" - labels = local.labels - annotations = local.annotations - } - - spec { - access_modes = ["ReadWriteOnce"] - storage_class_name = "gp3" - - resources { - requests = { - storage = each.value.size - } - } - } - - wait_until_bound = false -} - -# ─── Deployment (long-lived workspaces) ────────────────────────────────────── - -resource "kubernetes_deployment_v1" "workspace" { - count = var.deployment_type == "deployment" ? var.start_count : 0 - - metadata { - name = "${var.name_prefix}-${local.slug}" - namespace = "coder" - labels = local.labels - annotations = local.labels - } - - timeouts { - create = "15m" - } - - wait_for_rollout = true - - spec { - replicas = 1 - - strategy { - type = "Recreate" - } - - selector { - match_labels = local.labels - } - - template { - metadata { - labels = local.labels - annotations = local.pod_annotations - } - - spec { - affinity { - node_affinity { - preferred_during_scheduling_ignored_during_execution { - weight = 100 - preference { - match_expressions { - key = "role" - operator = "In" - values = ["workspace"] - } - } - } - } - } - - # ── Dev container ────────────────────────────────────────────── - container { - name = "dev" - image = "ghcr.io/coder/envbuilder:1.3.0" - - env { - name = "ENVBUILDER_GIT_URL" - value = var.git_url - } - env { - name = "ENVBUILDER_SKIP_REBUILD" - value = "false" - } - env { - name = "ENVBUILDER_FALLBACK_IMAGE" - value = "codercom/enterprise-base:ubuntu" - } - env { - name = "ENVBUILDER_SETUP_SCRIPT" - value = local.setup_script - } - env { - name = "ENVBUILDER_INIT_SCRIPT" - value = local.init_script - } - env { - name = "CODER_AGENT_TOKEN" - value = var.agent_token - } - env { - name = "CODER_AGENT_URL" - value = var.access_url - } - dynamic "env" { - for_each = var.docker_enabled ? [1] : [] - content { - name = "DOCKER_HOST" - value = "tcp://localhost:2375" - } - } - env { - name = "ENVBUILDER_CACHE_REPO" - value = "envbuilder-registry.coder.svc.cluster.local:5000/envbuilder-cache" - } - env { - name = "ENVBUILDER_INSECURE" - value = "true" - } - - volume_mount { - name = "workspace" - mount_path = "/workspaces" - } - - volume_mount { - name = "agent-state" - mount_path = "/persist/agent-state" - } - - dynamic "volume_mount" { - for_each = local.dev_extra_mounts - content { - name = volume_mount.value.name - mount_path = volume_mount.value.mount_path - } - } - - resources { - requests = var.dev_resources.requests - limits = var.dev_resources.limits - } - } - - # ── DinD sidecar ─────────────────────────────────────────────── - dynamic "container" { - for_each = var.docker_enabled ? [1] : [] - content { - name = "dind" - image = "docker:27-dind" - - security_context { - privileged = true - } - - env { - name = "DOCKER_TLS_CERTDIR" - value = "" - } - - port { - container_port = 2375 - protocol = "TCP" - } - - # Always mount workspace for docker-compose bind mounts - volume_mount { - name = "workspace" - mount_path = "/workspaces" - } - - dynamic "volume_mount" { - for_each = local.dind_extra_mounts - content { - name = volume_mount.value.name - mount_path = volume_mount.value.mount_path - } - } - - resources { - requests = var.dind_resources.requests - limits = var.dind_resources.limits - } - } - } - - # ── Standard volumes ───────────────────────────────────────── - volume { - name = "workspace" - persistent_volume_claim { - claim_name = kubernetes_persistent_volume_claim_v1.workspace.metadata[0].name - } - } - - volume { - name = "agent-state" - persistent_volume_claim { - claim_name = kubernetes_persistent_volume_claim_v1.agent_state.metadata[0].name - } - } - - # ── Extra persistent volumes ───────────────────────────────── - dynamic "volume" { - for_each = { for v in local.active_volumes : v.name => v if v.persistent } - content { - name = volume.key - persistent_volume_claim { - claim_name = kubernetes_persistent_volume_claim_v1.extra[volume.key].metadata[0].name - } - } - } - - # ── Extra ephemeral volumes ────────────────────────────────── - dynamic "volume" { - for_each = { for v in local.active_volumes : v.name => v if !v.persistent } - content { - name = volume.key - empty_dir { - size_limit = volume.value.size - } - } - } - } - } - } -} - -# ─── Pod (ephemeral tasks) ─────────────────────────────────────────────────── - -resource "kubernetes_pod_v1" "workspace" { - count = var.deployment_type == "pod" ? var.start_count : 0 - - metadata { - name = "${var.name_prefix}-${local.slug}" - namespace = "coder" - labels = local.labels - annotations = local.pod_annotations - } - - spec { - affinity { - node_affinity { - preferred_during_scheduling_ignored_during_execution { - weight = 100 - preference { - match_expressions { - key = "role" - operator = "In" - values = ["workspace"] - } - } - } - } - } - - restart_policy = var.restart_policy - termination_grace_period_seconds = var.termination_grace_period_seconds - - # ── Dev container ────────────────────────────────────────────── - container { - name = "dev" - image = "ghcr.io/coder/envbuilder:1.3.0" - - env { - name = "ENVBUILDER_GIT_URL" - value = var.git_url - } - env { - name = "ENVBUILDER_SKIP_REBUILD" - value = "false" - } - env { - name = "ENVBUILDER_FALLBACK_IMAGE" - value = "codercom/enterprise-base:ubuntu" - } - env { - name = "ENVBUILDER_SETUP_SCRIPT" - value = local.setup_script - } - env { - name = "ENVBUILDER_INIT_SCRIPT" - value = local.init_script - } - env { - name = "CODER_AGENT_TOKEN" - value = var.agent_token - } - env { - name = "CODER_AGENT_URL" - value = var.access_url - } - dynamic "env" { - for_each = var.docker_enabled ? [1] : [] - content { - name = "DOCKER_HOST" - value = "tcp://localhost:2375" - } - } - env { - name = "ENVBUILDER_CACHE_REPO" - value = "envbuilder-registry.coder.svc.cluster.local:5000/envbuilder-cache" - } - env { - name = "ENVBUILDER_INSECURE" - value = "true" - } - - volume_mount { - name = "workspace" - mount_path = "/workspaces" - } - - volume_mount { - name = "agent-state" - mount_path = "/persist/agent-state" - } - - dynamic "volume_mount" { - for_each = local.dev_extra_mounts - content { - name = volume_mount.value.name - mount_path = volume_mount.value.mount_path - } - } - - resources { - requests = var.dev_resources.requests - limits = var.dev_resources.limits - } - } - - # ── DinD sidecar ─────────────────────────────────────────────── - dynamic "container" { - for_each = var.docker_enabled ? [1] : [] - content { - name = "dind" - image = "docker:27-dind" - - security_context { - privileged = true - } - - env { - name = "DOCKER_TLS_CERTDIR" - value = "" - } - - port { - container_port = 2375 - protocol = "TCP" - } - - volume_mount { - name = "workspace" - mount_path = "/workspaces" - } - - dynamic "volume_mount" { - for_each = local.dind_extra_mounts - content { - name = volume_mount.value.name - mount_path = volume_mount.value.mount_path - } - } - - resources { - requests = var.dind_resources.requests - limits = var.dind_resources.limits - } - } - } - - # ── Standard volumes ───────────────────────────────────────── - volume { - name = "workspace" - persistent_volume_claim { - claim_name = kubernetes_persistent_volume_claim_v1.workspace.metadata[0].name - } - } - - volume { - name = "agent-state" - persistent_volume_claim { - claim_name = kubernetes_persistent_volume_claim_v1.agent_state.metadata[0].name - } - } - - # ── Extra persistent volumes ───────────────────────────────── - dynamic "volume" { - for_each = { for v in local.active_volumes : v.name => v if v.persistent } - content { - name = volume.key - persistent_volume_claim { - claim_name = kubernetes_persistent_volume_claim_v1.extra[volume.key].metadata[0].name - } - } - } - - # ── Extra ephemeral volumes ────────────────────────────────── - dynamic "volume" { - for_each = { for v in local.active_volumes : v.name => v if !v.persistent } - content { - name = volume.key - empty_dir { - size_limit = volume.value.size - } - } - } - } -} diff --git a/terraform/modules/workspace-pod/outputs.tf b/terraform/modules/workspace-pod/outputs.tf deleted file mode 100644 index 209d1f2..0000000 --- a/terraform/modules/workspace-pod/outputs.tf +++ /dev/null @@ -1,36 +0,0 @@ -output "pod_name" { - description = "Name of the created pod or deployment" - value = var.deployment_type == "deployment" ? ( - length(kubernetes_deployment_v1.workspace) > 0 ? kubernetes_deployment_v1.workspace[0].metadata[0].name : "" - ) : ( - length(kubernetes_pod_v1.workspace) > 0 ? kubernetes_pod_v1.workspace[0].metadata[0].name : "" - ) -} - -# Test-introspection outputs. Internal contract — consumed only by -# terraform/tests/*.tftest.hcl, not by the root module's production path. - -output "docker_enabled" { - description = "Test-only. Echoes var.docker_enabled so root-level tests can assert on the value the module received." - value = var.docker_enabled -} - -# Test-introspection only. Consumed by terraform/tests/*.tftest.hcl. -# Deliberately redacted projection — ONLY container names and env VAR NAMES -# are exposed. Env values are omitted because they include -# CODER_AGENT_TOKEN, GITHUB_TOKEN, and other sensitive strings; re-exporting -# those via an output would print them in plaintext in CI logs and -# `terraform output` listings. -output "pod_containers" { - description = "Test-only. Redacted container list: name + env var names only. Not a production contract." - value = [ - for c in(var.deployment_type == "pod" ? ( - length(kubernetes_pod_v1.workspace) > 0 ? kubernetes_pod_v1.workspace[0].spec[0].container : [] - ) : ( - length(kubernetes_deployment_v1.workspace) > 0 ? kubernetes_deployment_v1.workspace[0].spec[0].template[0].spec[0].container : [] - )) : { - name = c.name - env = [for e in c.env : { name = e.name }] - } - ] -} diff --git a/terraform/modules/workspace-pod/variables.tf b/terraform/modules/workspace-pod/variables.tf deleted file mode 100644 index e5e1fec..0000000 --- a/terraform/modules/workspace-pod/variables.tf +++ /dev/null @@ -1,162 +0,0 @@ -# ─── Identity ──────────────────────────────────────────────────────────────── - -variable "workspace_name" { - type = string - description = "Coder workspace name" -} - -variable "workspace_id" { - type = string - description = "Coder workspace ID" -} - -variable "start_count" { - type = number - description = "data.coder_workspace.me.start_count — controls whether pod/deployment is created" -} - -variable "owner_name" { - type = string - description = "Workspace owner full name" -} - -variable "owner_email" { - type = string - description = "Workspace owner email" -} - -variable "owner_username" { - type = string - description = "Workspace owner username" -} - -# ─── Agent ─────────────────────────────────────────────────────────────────── - -variable "agent_token" { - type = string - sensitive = true - description = "Coder agent token" -} - -variable "agent_init_script" { - type = string - description = "Coder agent init_script (base64-encoded and executed via setpriv)" - default = "" -} - -variable "access_url" { - type = string - description = "Coder access URL" -} - -# ─── Workload type ─────────────────────────────────────────────────────────── - -variable "deployment_type" { - type = string - description = "Kubernetes workload type: 'deployment' or 'pod'" - default = "deployment" - - validation { - condition = contains(["deployment", "pod"], var.deployment_type) - error_message = "deployment_type must be 'deployment' or 'pod'" - } -} - -variable "restart_policy" { - type = string - description = "Pod restart policy (only used when deployment_type = 'pod')" - default = "Always" -} - -variable "termination_grace_period_seconds" { - type = number - description = "Termination grace period (only used when deployment_type = 'pod')" - default = 30 -} - -variable "do_not_disrupt" { - type = bool - description = "Add karpenter.sh/do-not-disrupt annotation" - default = false -} - -variable "docker_enabled" { - type = bool - description = "When false, the dind sidecar, DOCKER_HOST env on dev, and any containers=\"dind\" or \"both\" volume mounts are omitted from the rendered pod spec." - default = true -} - -# ─── Git / envbuilder ──────────────────────────────────────────────────────── - -variable "git_url" { - type = string - description = "Git URL for envbuilder (may include #refs/heads/branch suffix)" -} - -variable "setup_script" { - type = string - description = "Additional setup script lines appended after the standard chown" - default = "" -} - -# ─── Resource profiles ─────────────────────────────────────────────────────── - -variable "dev_resources" { - type = object({ - requests = map(string) - limits = map(string) - }) - description = "Resource requests/limits for the dev (envbuilder) container" - default = { - requests = { cpu = "500m", memory = "8Gi", "ephemeral-storage" = "1Gi" } - limits = { cpu = "16", memory = "32Gi", "ephemeral-storage" = "10Gi" } - } -} - -variable "dind_resources" { - type = object({ - requests = map(string) - limits = map(string) - }) - description = "Resource requests/limits for the dind sidecar" - default = { - requests = { cpu = "250m", memory = "1Gi", "ephemeral-storage" = "1Gi" } - limits = { cpu = "4", memory = "8Gi", "ephemeral-storage" = "10Gi" } - } -} - -# ─── Storage ───────────────────────────────────────────────────────────────── - -variable "workspace_size" { - type = string - description = "Size of the workspace PVC (e.g. '10Gi', '30Gi')" - default = "10Gi" -} - -variable "volumes" { - type = list(object({ - name = string - size = string - mount_path = string - persistent = optional(bool, true) - count = optional(number, 1) - # Which containers get this mount: "dev", "dind", or "both" - containers = optional(string, "dev") - })) - description = "Additional volumes beyond workspace and agent-state" - default = [] -} - -# ─── Labels ────────────────────────────────────────────────────────────────── - -variable "app_name" { - type = string - description = "Value for app.kubernetes.io/name label" - default = "coder-workspace" -} - -variable "name_prefix" { - type = string - description = "Prefix for Kubernetes resource names" - default = "workspace" -} diff --git a/terraform/outputs.tf b/terraform/outputs.tf deleted file mode 100644 index 4a18ebe..0000000 --- a/terraform/outputs.tf +++ /dev/null @@ -1,38 +0,0 @@ -# ─── Test-introspection outputs ────────────────────────────────────────────── -# Consumed by terraform/tests/*.tftest.hcl. Every key uses try() so this file -# stays valid as later phases introduce additional locals. - -output "task_metadata" { - value = { - repo_url = try(local.repo_url, "") - repo_name = try(local.repo_name, "") - ai_prompt = try(local.ai_prompt, "") - base_branch = try(local.base_branch, "") - size = try(local.size, "") - docker = try(local.docker, false) - extra_volumes = try(local.extra_volumes, []) - work_dir = try(local.work_dir, "") - git_url = try(local.git_url, "") - json_valid = try(local.json_valid, false) - } -} - -output "dev_resources" { - value = try(local.dev_resources, null) -} - -output "dind_resources" { - value = try(local.dind_resources, null) -} - -output "docker_enabled" { - value = try(module.workspace.docker_enabled, null) -} - -output "all_volumes" { - value = try(local.all_volumes, []) -} - -output "mapped_extra_volumes" { - value = try(local.mapped_extra_volumes, []) -} diff --git a/terraform/tests/task-metadata.tftest.hcl b/terraform/tests/task-metadata.tftest.hcl deleted file mode 100644 index baf4a65..0000000 --- a/terraform/tests/task-metadata.tftest.hcl +++ /dev/null @@ -1,804 +0,0 @@ -# ─── Mock providers ────────────────────────────────────────────────────────── - -mock_provider "coder" { - override_data { - target = data.coder_workspace.me - values = { - start_count = 0 - name = "test" - id = "00000000-0000-0000-0000-000000000000" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_workspace_owner.me - values = { - full_name = "Test User" - email = "test@example.test" - name = "test" - } - } -} - -mock_provider "kubernetes" {} - -# ─── Shared variables ──────────────────────────────────────────────────────── - -variables { - claude_code_oauth_token = "fake-oauth-token" - github_pat = "fake-pat" -} - -# ─── Smoke test ────────────────────────────────────────────────────────────── - -run "golden_path_parses" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000000" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { - prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"Do the thing\"}" - } - } - - assert { - condition = output.task_metadata.repo_url == "https://github.com/acme/widget" - error_message = "repo_url did not round-trip from JSON prompt" - } - assert { - condition = output.task_metadata.json_valid == true - error_message = "json_valid should be true for a well-formed JSON prompt" - } -} - -# ─── Precondition firing ───────────────────────────────────────────────────── -# -# Each fixture below violates EXACTLY ONE precondition. Other required fields -# remain non-blank and the JSON remains structurally valid so the NAMED -# precondition is the one that trips — not a sibling. When adding new -# preconditions or reordering the `lifecycle.precondition` list in -# terraform/main.tf, update these fixtures in lockstep; otherwise -# `expect_failures` may pass for the wrong reason. - -run "invalid_json_fails_precondition" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000001" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { prompt = "not-json" } - } - - expect_failures = [resource.coder_agent.dev] -} - -run "non_object_json_fails_precondition" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000008" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { prompt = "[1,2,3]" } - } - - # Valid JSON but not a TaskMetadata object. json_valid must be false so - # EARS-1 trips (not EARS-2 via try() returning ""). Guards the can()-based - # tightening of local.json_valid in terraform/main.tf against regression. - expect_failures = [resource.coder_agent.dev] -} - -run "blank_repo_url_fails_precondition" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000002" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { prompt = "{\"repo_url\":\"\",\"repo_name\":\"x\",\"ai_prompt\":\"y\"}" } - } - - expect_failures = [resource.coder_agent.dev] -} - -run "blank_repo_name_fails_precondition" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000003" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { prompt = "{\"repo_url\":\"https://github.com/a/b\",\"repo_name\":\"\",\"ai_prompt\":\"y\"}" } - } - - expect_failures = [resource.coder_agent.dev] -} - -run "blank_ai_prompt_fails_precondition" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000004" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { prompt = "{\"repo_url\":\"https://github.com/a/b\",\"repo_name\":\"b\",\"ai_prompt\":\"\"}" } - } - - expect_failures = [resource.coder_agent.dev] -} - -# ─── Defaults and derivations ──────────────────────────────────────────────── - -run "defaults_applied_when_optionals_absent" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000005" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { - prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"Do the thing\"}" - } - } - - assert { - condition = output.task_metadata.size == "large" - error_message = "size must default to 'large' when absent (EARS-6)" - } - assert { - condition = output.task_metadata.docker == false - error_message = "docker must default to false when absent" - } - assert { - condition = output.task_metadata.base_branch == "" - error_message = "base_branch must default to empty string when absent" - } - assert { - condition = length(output.task_metadata.extra_volumes) == 0 - error_message = "extra_volumes must default to empty list when absent" - } - assert { - condition = output.task_metadata.work_dir == "/workspaces/widget" - error_message = "work_dir must be /workspaces/ (EARS-16)" - } - assert { - condition = output.task_metadata.git_url == "https://github.com/acme/widget" - error_message = "git_url must equal repo_url when base_branch is empty (EARS-14)" - } -} - -run "base_branch_composes_git_url_suffix" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000006" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { - prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"Do the thing\",\"base_branch\":\"feature-x\"}" - } - } - - assert { - condition = output.task_metadata.git_url == "https://github.com/acme/widget#refs/heads/feature-x" - error_message = "git_url must append #refs/heads/ when base_branch is set (EARS-14)" - } -} - -run "ai_prompt_passthrough_no_wrapping" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000007" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { - prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"LITERAL_PROMPT_TOKEN\"}" - } - } - - assert { - condition = output.task_metadata.ai_prompt == "LITERAL_PROMPT_TOKEN" - error_message = "ai_prompt must be passed through verbatim, no template wrapping (EARS-15)" - } -} - -# ─── Dashboard metadata ────────────────────────────────────────────────────── - -run "coder_metadata_exposes_repo_url" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000017" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { - prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"Do the thing\"}" - } - } - - assert { - condition = length([for i in coder_metadata.task_info[0].item : i if i.key == "repo" && i.value == "https://github.com/acme/widget"]) == 1 - error_message = "coder_metadata.task_info must expose repo_url via a 'repo' item (EARS-17)" - } -} - -# ─── Size profiles ─────────────────────────────────────────────────────────── - -run "size_default_large_profile" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000009" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { - prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\"}" - } - } - - assert { - condition = output.dev_resources.requests.cpu == "2" - error_message = "default size (absent) must apply the large profile — requests.cpu (EARS-6, EARS-9)" - } - assert { - condition = output.dev_resources.requests.memory == "8Gi" - error_message = "default (large) requests.memory (EARS-9)" - } - assert { - condition = output.dev_resources.requests["ephemeral-storage"] == "30Gi" - error_message = "default (large) requests.ephemeral-storage (EARS-9)" - } - assert { - condition = output.dev_resources.limits.cpu == "8" - error_message = "default (large) limits.cpu (EARS-9)" - } - assert { - condition = output.dev_resources.limits.memory == "24Gi" - error_message = "default (large) limits.memory (EARS-9)" - } - assert { - condition = output.dev_resources.limits["ephemeral-storage"] == "50Gi" - error_message = "default (large) limits.ephemeral-storage (EARS-9)" - } -} - -run "size_small_profile" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000010" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { - prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"size\":\"small\"}" - } - } - - assert { - condition = output.dev_resources.requests.cpu == "1" && output.dev_resources.requests.memory == "4Gi" && output.dev_resources.requests["ephemeral-storage"] == "10Gi" - error_message = "small profile requests mismatch (EARS-7): expected {cpu=1, memory=4Gi, ephemeral-storage=10Gi}" - } - assert { - condition = output.dev_resources.limits.cpu == "4" && output.dev_resources.limits.memory == "8Gi" && output.dev_resources.limits["ephemeral-storage"] == "20Gi" - error_message = "small profile limits mismatch (EARS-7): expected {cpu=4, memory=8Gi, ephemeral-storage=20Gi}" - } - # EARS-10: dind resources must be identical across all sizes. - assert { - condition = output.dind_resources.requests.cpu == "250m" && output.dind_resources.requests.memory == "1Gi" && output.dind_resources.requests["ephemeral-storage"] == "5Gi" - error_message = "dind requests must be {cpu=250m, memory=1Gi, ephemeral-storage=5Gi} across all sizes (EARS-10)" - } - assert { - condition = output.dind_resources.limits.cpu == "2" && output.dind_resources.limits.memory == "4Gi" && output.dind_resources.limits["ephemeral-storage"] == "20Gi" - error_message = "dind limits must be {cpu=2, memory=4Gi, ephemeral-storage=20Gi} across all sizes (EARS-10)" - } -} - -run "size_medium_profile" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000011" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { - prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"size\":\"medium\"}" - } - } - - assert { - condition = output.dev_resources.requests.cpu == "1" && output.dev_resources.requests.memory == "4Gi" && output.dev_resources.requests["ephemeral-storage"] == "20Gi" - error_message = "medium profile requests mismatch (EARS-8): expected {cpu=1, memory=4Gi, ephemeral-storage=20Gi}" - } - assert { - condition = output.dev_resources.limits.cpu == "8" && output.dev_resources.limits.memory == "12Gi" && output.dev_resources.limits["ephemeral-storage"] == "30Gi" - error_message = "medium profile limits mismatch (EARS-8): expected {cpu=8, memory=12Gi, ephemeral-storage=30Gi}" - } - # EARS-10: dind resources must be identical across all sizes. - assert { - condition = output.dind_resources.requests.cpu == "250m" && output.dind_resources.requests.memory == "1Gi" && output.dind_resources.requests["ephemeral-storage"] == "5Gi" - error_message = "dind requests must be {cpu=250m, memory=1Gi, ephemeral-storage=5Gi} across all sizes (EARS-10)" - } - assert { - condition = output.dind_resources.limits.cpu == "2" && output.dind_resources.limits.memory == "4Gi" && output.dind_resources.limits["ephemeral-storage"] == "20Gi" - error_message = "dind limits must be {cpu=2, memory=4Gi, ephemeral-storage=20Gi} across all sizes (EARS-10)" - } -} - -run "size_large_profile_explicit" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000012" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { - prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"size\":\"large\"}" - } - } - - assert { - condition = output.dev_resources.requests.cpu == "2" && output.dev_resources.requests.memory == "8Gi" && output.dev_resources.requests["ephemeral-storage"] == "30Gi" - error_message = "explicit large profile requests mismatch (EARS-9)" - } - assert { - condition = output.dev_resources.limits.cpu == "8" && output.dev_resources.limits.memory == "24Gi" && output.dev_resources.limits["ephemeral-storage"] == "50Gi" - error_message = "explicit large profile limits mismatch (EARS-9)" - } - # EARS-10: dind resources must be identical across all sizes. - assert { - condition = output.dind_resources.requests.cpu == "250m" && output.dind_resources.requests.memory == "1Gi" && output.dind_resources.requests["ephemeral-storage"] == "5Gi" - error_message = "dind requests must be {cpu=250m, memory=1Gi, ephemeral-storage=5Gi} across all sizes (EARS-10)" - } - assert { - condition = output.dind_resources.limits.cpu == "2" && output.dind_resources.limits.memory == "4Gi" && output.dind_resources.limits["ephemeral-storage"] == "20Gi" - error_message = "dind limits must be {cpu=2, memory=4Gi, ephemeral-storage=20Gi} across all sizes (EARS-10)" - } -} - -run "size_invalid_fails_precondition" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000013" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { - prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"size\":\"xl\"}" - } - } - - # Violates the size-allowlist precondition ONLY — other required fields - # remain non-blank and the JSON is valid. Maintains the one-violation-per- - # fixture invariant documented above the Phase 2 precondition block. - expect_failures = [resource.coder_agent.dev] -} - -# COVERAGE NOTE: the three docker-gating runs below all exercise -# var.deployment_type == "pod" (the root template's default). The -# workspace-pod module also gates dind/DOCKER_HOST inside a mirrored -# kubernetes_deployment_v1 branch that is NOT exercised by these tests. -# If the template ever enables `deployment_type = "deployment"`, add a -# module-level tftest under terraform/modules/workspace-pod/tests/ that -# asserts the same invariants with deployment_type="deployment". - -# ─── Docker sidecar gating ────────────────────────────────────────────────── - -run "docker_false_by_default" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000015" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { - prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\"}" - } - } - - # EARS-6 default-docker behavior: absent => false => dind container omitted - assert { - condition = output.docker_enabled == false - error_message = "docker must default to false when absent, and the workspace-pod module must receive docker_enabled=false (EARS-11)" - } - assert { - condition = length([for c in module.workspace.pod_containers : c if c.name == "dind"]) == 0 - error_message = "dind container must not be rendered when docker=false (EARS-11)" - } -} - -run "docker_true_enables_sidecar" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000016" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { - prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"docker\":true}" - } - } - - assert { - condition = output.docker_enabled == true - error_message = "docker=true must propagate to workspace-pod.docker_enabled (EARS-12)" - } - assert { - condition = length([for c in module.workspace.pod_containers : c if c.name == "dind"]) == 1 - error_message = "dind container must be rendered exactly once when docker=true (EARS-12)" - } - assert { - condition = length([for c in module.workspace.pod_containers : c if c.name == "dev" && length([for e in c.env : e if e.name == "DOCKER_HOST"]) > 0]) == 1 - error_message = "DOCKER_HOST env must be present on dev container when docker=true (EARS-12)" - } -} - -run "docker_false_sets_no_docker_host" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000018" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { - prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"docker\":false}" - } - } - - # EARS-11: when docker=false, DOCKER_HOST must NOT appear on the dev container - assert { - condition = length([for c in module.workspace.pod_containers : c if c.name == "dev" && length([for e in c.env : e if e.name == "DOCKER_HOST"]) > 0]) == 0 - error_message = "DOCKER_HOST env must not be set on dev container when docker=false (EARS-11)" - } -} - -# ─── Volume mapping ────────────────────────────────────────────────────────── - -run "extra_volumes_mapped_to_module_shape" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000019" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { - prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"extra_volumes\":[{\"path\":\"/home/runner/cache\",\"size\":\"5Gi\"}]}" - } - } - - assert { - condition = length(output.mapped_extra_volumes) == 1 - error_message = "one extra_volume entry must produce one module volume (EARS-13)" - } - assert { - condition = output.mapped_extra_volumes[0].mount_path == "/home/runner/cache" - error_message = "mount_path must equal input path (EARS-13)" - } - assert { - condition = output.mapped_extra_volumes[0].persistent == true - error_message = "extra volumes must be persistent by default (per user clarification)" - } - assert { - condition = output.mapped_extra_volumes[0].containers == "dev" - error_message = "extra volumes must mount on dev container only" - } - assert { - condition = output.mapped_extra_volumes[0].size == "5Gi" - error_message = "extra volume size must pass through verbatim" - } - assert { - condition = output.mapped_extra_volumes[0].name == "home-runner-cache" - error_message = "PVC name must be input path with leading slash trimmed and remaining slashes replaced with dashes" - } - assert { - condition = output.mapped_extra_volumes[0].count == 1 - error_message = "mapped extra volume must have count=1" - } -} - -run "multiple_extra_volumes_mapped" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000023" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { - prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"extra_volumes\":[{\"path\":\"/a\",\"size\":\"1Gi\"},{\"path\":\"/b/c\",\"size\":\"2Gi\"}]}" - } - } - - # Two entries should produce two independent module volumes. - assert { - condition = length(output.mapped_extra_volumes) == 2 - error_message = "two extra_volumes entries must produce two module volumes (EARS-13)" - } - assert { - condition = length([for v in output.mapped_extra_volumes : v if v.name == "a" && v.mount_path == "/a" && v.size == "1Gi"]) == 1 - error_message = "single-segment path /a must map to name=a, mount_path=/a, size=1Gi" - } - assert { - condition = length([for v in output.mapped_extra_volumes : v if v.name == "b-c" && v.mount_path == "/b/c" && v.size == "2Gi"]) == 1 - error_message = "multi-segment path /b/c must map to name=b-c, mount_path=/b/c, size=2Gi" - } -} - -run "extra_volumes_default_empty" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000020" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { - prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\"}" - } - } - - # When extra_volumes is omitted, mapped_extra_volumes must be empty and - # all_volumes must be empty (docker is absent => false => no docker-cache). - assert { - condition = length(output.mapped_extra_volumes) == 0 - error_message = "mapped_extra_volumes must be empty when extra_volumes is absent" - } - assert { - condition = length(output.all_volumes) == 0 - error_message = "all_volumes must be empty when docker=false and no extra_volumes" - } -} - -run "docker_cache_volume_present_when_docker_true" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000021" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { - prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"docker\":true}" - } - } - - # Exactly one volume named "docker-cache" must be present, mounted on dind. - assert { - condition = length([for v in output.all_volumes : v if v.name == "docker-cache"]) == 1 - error_message = "all_volumes must include exactly one docker-cache volume when docker=true (EARS-12)" - } - assert { - condition = [for v in output.all_volumes : v if v.name == "docker-cache"][0].containers == "dind" - error_message = "docker-cache volume must mount on dind container (EARS-12)" - } - assert { - condition = [for v in output.all_volumes : v if v.name == "docker-cache"][0].persistent == false - error_message = "docker-cache volume must be ephemeral (not persistent)" - } - assert { - condition = [for v in output.all_volumes : v if v.name == "docker-cache"][0].mount_path == "/var/lib/docker" - error_message = "docker-cache mount_path must be /var/lib/docker" - } - assert { - condition = [for v in output.all_volumes : v if v.name == "docker-cache"][0].size == "10Gi" - error_message = "docker-cache size must be 10Gi (EARS-12)" - } -} - -run "docker_cache_volume_absent_when_docker_false" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000022" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { - prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"docker\":false,\"extra_volumes\":[{\"path\":\"/cache\",\"size\":\"2Gi\"}]}" - } - } - - # docker=false: no docker-cache volume. But extra_volumes still maps. - assert { - condition = length([for v in output.all_volumes : v if v.name == "docker-cache"]) == 0 - error_message = "docker-cache volume must NOT be in all_volumes when docker=false (EARS-11)" - } - assert { - condition = length([for v in output.all_volumes : v if v.name == "cache"]) == 1 - error_message = "extra_volumes must still be mapped into all_volumes when docker=false" - } -} - -run "docker_true_plus_extra_volumes" { - command = plan - - override_data { - target = data.coder_workspace.me - values = { - start_count = 1 - name = "t" - id = "00000000-0000-0000-0000-000000000024" - access_url = "https://example.test" - } - } - override_data { - target = data.coder_task.me - values = { - prompt = "{\"repo_url\":\"https://github.com/acme/widget\",\"repo_name\":\"widget\",\"ai_prompt\":\"x\",\"docker\":true,\"extra_volumes\":[{\"path\":\"/data\",\"size\":\"4Gi\"}]}" - } - } - - # docker=true should produce docker-cache + one mapped extra volume = 2 total. - assert { - condition = length(output.all_volumes) == 2 - error_message = "docker=true + 1 extra_volume must yield 2 total volumes (EARS-12 + EARS-13)" - } - assert { - condition = length([for v in output.all_volumes : v if v.name == "docker-cache"]) == 1 - error_message = "docker-cache must be present when docker=true, even with extra_volumes" - } - assert { - condition = length([for v in output.all_volumes : v if v.name == "data"]) == 1 - error_message = "extra_volume must still be mapped when docker=true" - } -} From 5ac5e3389087f30a0bd17b3d1f985b4f78219aae Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 23:40:06 -0700 Subject: [PATCH 24/25] Bump package-lock.json --- package-lock.json | 36 +++++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/package-lock.json b/package-lock.json index 90f4749..e80be5c 100644 --- a/package-lock.json +++ b/package-lock.json @@ -333,8 +333,7 @@ "resolved": "https://registry.npmjs.org/@cloudflare/workers-types/-/workers-types-4.20260418.1.tgz", "integrity": "sha512-bywXb2XmeSqrLCQYipcupLneqx015YhhNWz2v9b9iatpe8Cg551vP7ZuD5S2a6GfBka0dDnO70kIBiBvFglcrg==", "dev": true, - "license": "MIT OR Apache-2.0", - "peer": true + "license": "MIT OR Apache-2.0" }, "node_modules/@cspotcode/source-map-support": { "version": "0.8.1", @@ -349,6 +348,30 @@ "node": ">=12" } }, + "node_modules/@emnapi/core": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.10.0.tgz", + "integrity": "sha512-yq6OkJ4p82CAfPl0u9mQebQHKPJkY7WrIuk205cTYnYe+k2Z8YBh11FrbRG/H6ihirqcacOgl2BIO8oyMQLeXw==", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "@emnapi/wasi-threads": "1.2.1", + "tslib": "^2.4.0" + } + }, + "node_modules/@emnapi/runtime": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.10.0.tgz", + "integrity": "sha512-ewvYlk86xUoGI0zQRNq/mC+16R1QeDlKQy21Ki3oSYXNgLb45GV1P6A0M+/s6nyCuNDqe5VpaY84BzXGwVbwFA==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, "node_modules/@emnapi/wasi-threads": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.2.1.tgz", @@ -1407,7 +1430,6 @@ "node_modules/@octokit/core": { "version": "7.0.6", "license": "MIT", - "peer": true, "dependencies": { "@octokit/auth-token": "^6.0.0", "@octokit/graphql": "^9.0.3", @@ -1967,7 +1989,6 @@ "integrity": "sha512-+qIYRKdNYJwY3vRCZMdJbPLJAtGjQBudzZzdzwQYkEPQd+PJGixUL5QfvCLDaULoLv+RhT3LDkwEfKaAkgSmNQ==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "undici-types": "~7.19.0" } @@ -2036,7 +2057,6 @@ "integrity": "sha512-xTp7VZ5aXP5ZJrn15UtJUWlx6qXLnGtF6jNxHepdPHpMfz/aVPx+htHtgcAL2mDXJgKhpoo2e9/hVJsIeFbytQ==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@vitest/utils": "4.1.4", "pathe": "^2.0.3" @@ -2051,7 +2071,6 @@ "integrity": "sha512-MCjCFgaS8aZz+m5nTcEcgk/xhWv0rEH4Yl53PPlMXOZ1/Ka2VcZU6CJ+MgYCZbcJvzGhQRjVrGQNZqkGPttIKw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@vitest/pretty-format": "4.1.4", "@vitest/utils": "4.1.4", @@ -2655,7 +2674,6 @@ "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=12" }, @@ -2926,7 +2944,6 @@ "integrity": "sha512-i7qRCmY42zmCwnYlh9H2SvLEypEFGye5iRmEMKjcGi7zk9UquigRjFtTLz0TYqr0ZGLZhaMHl/foy1bZR+Cwlw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "pathe": "^2.0.3" } @@ -2945,7 +2962,6 @@ "integrity": "sha512-dbU7/iLVa8KZALJyLOBOQ88nOXtNG8vxKuOT4I2mD+Ya70KPceF4IAmDsmU0h1Qsn5bPrvsY9HJstCRh3hG6Uw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "lightningcss": "^1.32.0", "picomatch": "^4.0.4", @@ -3024,7 +3040,6 @@ "integrity": "sha512-tFuJqTxKb8AvfyqMfnavXdzfy3h3sWZRWwfluGbkeR7n0HUev+FmNgZ8SDrRBTVrVCjgH5cA21qGbCffMNtWvg==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@vitest/expect": "4.1.4", "@vitest/mocker": "4.1.4", @@ -3133,7 +3148,6 @@ "dev": true, "hasInstallScript": true, "license": "Apache-2.0", - "peer": true, "bin": { "workerd": "bin/workerd" }, From 045a242517ba89cbeb505701706202b96b9b2612 Mon Sep 17 00:00:00 2001 From: Nicholas Molnar <65710+neekolas@users.noreply.github.com> Date: Mon, 20 Apr 2026 23:43:00 -0700 Subject: [PATCH 25/25] Fix formatting --- src/config/repo-config-schema.test.ts | 24 +++++++++++++--------- src/workflows/task-runner-workflow.test.ts | 24 +++++++++++----------- 2 files changed, 26 insertions(+), 22 deletions(-) diff --git a/src/config/repo-config-schema.test.ts b/src/config/repo-config-schema.test.ts index 932b200..c5c1998 100644 --- a/src/config/repo-config-schema.test.ts +++ b/src/config/repo-config-schema.test.ts @@ -150,14 +150,18 @@ describe("volume size normalization → canonical Kubernetes binary-SI form", () expect(r.sandbox.volumes[0]).toEqual({ path: "/data", size: "20Gi" }); }); - test.each(["10", "gb", "10bb", "10.5gb", "10eb", "abc"])( - "invalid volume size %s → parse rejects", - (input) => { - expect(() => - parseRepoConfigToml( - `[[sandbox.volumes]]\npath = "/data"\nsize = "${input}"`, - ), - ).toThrow(/Invalid RepoConfig/); - }, - ); + test.each([ + "10", + "gb", + "10bb", + "10.5gb", + "10eb", + "abc", + ])("invalid volume size %s → parse rejects", (input) => { + expect(() => + parseRepoConfigToml( + `[[sandbox.volumes]]\npath = "/data"\nsize = "${input}"`, + ), + ).toThrow(/Invalid RepoConfig/); + }); }); diff --git a/src/workflows/task-runner-workflow.test.ts b/src/workflows/task-runner-workflow.test.ts index 1acfe57..3e56a10 100644 --- a/src/workflows/task-runner-workflow.test.ts +++ b/src/workflows/task-runner-workflow.test.ts @@ -121,10 +121,10 @@ describe("TaskRunnerWorkflow dispatch — task_requested", () => { source: { type: "github", installationId: 1 }, repository: { owner: "acme", name: "repo" }, issue: { - id: 1001, - number: 1, - url: "https://github.com/acme/repo/issues/1", - }, + id: 1001, + number: 1, + url: "https://github.com/acme/repo/issues/1", + }, requester: { login: "alice", externalId: 42 }, }; await env.TASK_RUNNER_WORKFLOW.create({ id: instanceId, params }); @@ -192,10 +192,10 @@ describe("TaskRunnerWorkflow dispatch — task_requested", () => { }, repository: { owner: "acme", name: "repo" }, issue: { - id: 1001, - number: 1, - url: "https://github.com/acme/repo/issues/1", - }, + id: 1001, + number: 1, + url: "https://github.com/acme/repo/issues/1", + }, requester: { login: "alice", externalId: 42 }, }; await env.TASK_RUNNER_WORKFLOW.create({ id: instanceId, params }); @@ -260,10 +260,10 @@ describe("TaskRunnerWorkflow dispatch — task_requested", () => { source: { type: "github", installationId: 1 }, repository: { owner: "acme", name: "repo" }, issue: { - id: 1001, - number: 1, - url: "https://github.com/acme/repo/issues/1", - }, + id: 1001, + number: 1, + url: "https://github.com/acme/repo/issues/1", + }, requester: { login: "alice", externalId: 42 }, }; await env.TASK_RUNNER_WORKFLOW.create({ id: instanceId, params });