Compare commits

..

1 Commits

Author SHA1 Message Date
Jon Ayers f5b4777453 feat: add ghostty-web terminal support 2025-11-20 00:00:20 +00:00
739 changed files with 14604 additions and 36634 deletions
-2
View File
@@ -27,7 +27,5 @@ ignorePatterns:
- pattern: "splunk.com"
- pattern: "stackoverflow.com/questions"
- pattern: "developer.hashicorp.com/terraform/language"
- pattern: "platform.openai.com"
- pattern: "api.openai.com"
aliveStatusCodes:
- 200
+1 -1
View File
@@ -4,7 +4,7 @@ description: |
inputs:
version:
description: "The Go version to use."
default: "1.24.10"
default: "1.24.6"
use-preinstalled-go:
description: "Whether to use preinstalled Go."
default: "false"
+3 -10
View File
@@ -5,13 +5,6 @@ runs:
using: "composite"
steps:
- name: Setup sqlc
# uses: sqlc-dev/setup-sqlc@c0209b9199cd1cce6a14fc27cabcec491b651761 # v4.0.0
# with:
# sqlc-version: "1.30.0"
# Switched to coder/sqlc fork to fix ambiguous column bug, see:
# - https://github.com/coder/sqlc/pull/1
# - https://github.com/sqlc-dev/sqlc/pull/4159
shell: bash
run: |
CGO_ENABLED=1 go install github.com/coder/sqlc/cmd/sqlc@aab4e865a51df0c43e1839f81a9d349b41d14f05
uses: sqlc-dev/setup-sqlc@c0209b9199cd1cce6a14fc27cabcec491b651761 # v4.0.0
with:
sqlc-version: "1.27.0"
+1 -1
View File
@@ -7,5 +7,5 @@ runs:
- name: Install Terraform
uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3.1.2
with:
terraform_version: 1.13.4
terraform_version: 1.13.0
terraform_wrapper: false
@@ -0,0 +1,34 @@
app = "sao-paulo-coder"
primary_region = "gru"
[experimental]
entrypoint = ["/bin/sh", "-c", "CODER_DERP_SERVER_RELAY_URL=\"http://[${FLY_PRIVATE_IP}]:3000\" /opt/coder wsproxy server"]
auto_rollback = true
[build]
image = "ghcr.io/coder/coder-preview:main"
[env]
CODER_ACCESS_URL = "https://sao-paulo.fly.dev.coder.com"
CODER_HTTP_ADDRESS = "0.0.0.0:3000"
CODER_PRIMARY_ACCESS_URL = "https://dev.coder.com"
CODER_WILDCARD_ACCESS_URL = "*--apps.sao-paulo.fly.dev.coder.com"
CODER_VERBOSE = "true"
[http_service]
internal_port = 3000
force_https = true
auto_stop_machines = true
auto_start_machines = true
min_machines_running = 0
# Ref: https://fly.io/docs/reference/configuration/#http_service-concurrency
[http_service.concurrency]
type = "requests"
soft_limit = 50
hard_limit = 100
[[vm]]
cpu_kind = "shared"
cpus = 2
memory_mb = 512
+36 -32
View File
@@ -35,7 +35,7 @@ jobs:
tailnet-integration: ${{ steps.filter.outputs.tailnet-integration }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -157,7 +157,7 @@ jobs:
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -191,7 +191,7 @@ jobs:
# Check for any typos
- name: Check for typos
uses: crate-ci/typos@626c4bedb751ce0b7f03262ca97ddda9a076ae1c # v1.39.2
uses: crate-ci/typos@80c8a4945eec0f6d464eaf9e65ed98ef085283d1 # v1.38.1
with:
config: .github/workflows/typos.toml
@@ -230,12 +230,12 @@ jobs:
shell: bash
gen:
timeout-minutes: 20
timeout-minutes: 8
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
if: ${{ !cancelled() }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -271,7 +271,6 @@ jobs:
popd
- name: make gen
timeout-minutes: 8
run: |
# Remove golden files to detect discrepancy in generated files.
make clean/golden-files
@@ -289,10 +288,10 @@ jobs:
needs: changes
if: needs.changes.outputs.offlinedocs-only == 'false' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
timeout-minutes: 20
timeout-minutes: 7
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -316,7 +315,6 @@ jobs:
run: go install mvdan.cc/sh/v3/cmd/shfmt@v3.7.0
- name: make fmt
timeout-minutes: 7
run: |
PATH="${PATH}:$(go env GOPATH)/bin" \
make --output-sync -j -B fmt
@@ -343,7 +341,7 @@ jobs:
- windows-2022
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -378,6 +376,13 @@ jobs:
id: go-paths
uses: ./.github/actions/setup-go-paths
- name: Download Go Build Cache
id: download-go-build-cache
uses: ./.github/actions/test-cache/download
with:
key-prefix: test-go-build-${{ runner.os }}-${{ runner.arch }}
cache-path: ${{ steps.go-paths.outputs.cached-dirs }}
- name: Setup Go
uses: ./.github/actions/setup-go
with:
@@ -385,7 +390,8 @@ jobs:
# download the toolchain configured in go.mod, so we don't
# need to reinstall it. It's faster on Windows runners.
use-preinstalled-go: ${{ runner.os == 'Windows' }}
use-cache: true
# Cache is already downloaded above
use-cache: false
- name: Setup Terraform
uses: ./.github/actions/setup-tf
@@ -499,6 +505,12 @@ jobs:
name: failed-test-db-dump-${{matrix.os}}
path: "**/*.test.sql"
- name: Upload Go Build Cache
uses: ./.github/actions/test-cache/upload
with:
cache-key: ${{ steps.download-go-build-cache.outputs.cache-key }}
cache-path: ${{ steps.go-paths.outputs.cached-dirs }}
- name: Upload Test Cache
uses: ./.github/actions/test-cache/upload
with:
@@ -532,7 +544,7 @@ jobs:
timeout-minutes: 25
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -581,7 +593,7 @@ jobs:
timeout-minutes: 25
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -641,7 +653,7 @@ jobs:
timeout-minutes: 20
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -668,7 +680,7 @@ jobs:
timeout-minutes: 20
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -701,7 +713,7 @@ jobs:
name: ${{ matrix.variant.name }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -756,14 +768,6 @@ jobs:
path: ./site/test-results/**/*.webm
retention-days: 7
- name: Upload debug log
if: always() && github.actor != 'dependabot[bot]' && runner.os == 'Linux' && !github.event.pull_request.head.repo.fork
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
with:
name: coderd-debug-logs${{ matrix.variant.premium && '-premium' || '' }}
path: ./site/e2e/test-results/debug.log
retention-days: 7
- name: Upload pprof dumps
if: always() && github.actor != 'dependabot[bot]' && runner.os == 'Linux' && !github.event.pull_request.head.repo.fork
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
@@ -781,7 +785,7 @@ jobs:
if: needs.changes.outputs.site == 'true' || needs.changes.outputs.ci == 'true'
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -802,7 +806,7 @@ jobs:
# the check to pass. This is desired in PRs, but not in mainline.
- name: Publish to Chromatic (non-mainline)
if: github.ref != 'refs/heads/main' && github.repository_owner == 'coder'
uses: chromaui/action@ac86f2ff0a458ffbce7b40698abd44c0fa34d4b6 # v13.3.3
uses: chromaui/action@bc2d84ad2b60813a67d995c5582d696104a19383 # v13.3.2
env:
NODE_OPTIONS: "--max_old_space_size=4096"
STORYBOOK: true
@@ -834,7 +838,7 @@ jobs:
# infinitely "in progress" in mainline unless we re-review each build.
- name: Publish to Chromatic (mainline)
if: github.ref == 'refs/heads/main' && github.repository_owner == 'coder'
uses: chromaui/action@ac86f2ff0a458ffbce7b40698abd44c0fa34d4b6 # v13.3.3
uses: chromaui/action@bc2d84ad2b60813a67d995c5582d696104a19383 # v13.3.2
env:
NODE_OPTIONS: "--max_old_space_size=4096"
STORYBOOK: true
@@ -862,7 +866,7 @@ jobs:
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -933,7 +937,7 @@ jobs:
if: always()
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -1053,7 +1057,7 @@ jobs:
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -1108,7 +1112,7 @@ jobs:
IMAGE: ghcr.io/coder/coder-preview:${{ steps.build-docker.outputs.tag }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -1505,7 +1509,7 @@ jobs:
if: needs.changes.outputs.db == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
+6 -4
View File
@@ -36,7 +36,7 @@ jobs:
verdict: ${{ steps.check.outputs.verdict }} # DEPLOY or NOOP
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -65,7 +65,7 @@ jobs:
packages: write # to retag image as dogfood
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -92,7 +92,7 @@ jobs:
uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db # v3.0.1
- name: Set up Flux CLI
uses: fluxcd/flux2/action@b6e76ca2534f76dcb8dd94fb057cdfa923c3b641 # v2.7.3
uses: fluxcd/flux2/action@4a15fa6a023259353ef750acf1c98fe88407d4d0 # v2.7.2
with:
# Keep this and the github action up to date with the version of flux installed in dogfood cluster
version: "2.7.0"
@@ -146,7 +146,7 @@ jobs:
needs: deploy
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -163,10 +163,12 @@ jobs:
run: |
flyctl deploy --image "$IMAGE" --app paris-coder --config ./.github/fly-wsproxies/paris-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_PARIS" --yes
flyctl deploy --image "$IMAGE" --app sydney-coder --config ./.github/fly-wsproxies/sydney-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_SYDNEY" --yes
flyctl deploy --image "$IMAGE" --app sao-paulo-coder --config ./.github/fly-wsproxies/sao-paulo-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_SAO_PAULO" --yes
flyctl deploy --image "$IMAGE" --app jnb-coder --config ./.github/fly-wsproxies/jnb-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_JNB" --yes
env:
FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }}
IMAGE: ${{ inputs.image }}
TOKEN_PARIS: ${{ secrets.FLY_PARIS_CODER_PROXY_SESSION_TOKEN }}
TOKEN_SYDNEY: ${{ secrets.FLY_SYDNEY_CODER_PROXY_SESSION_TOKEN }}
TOKEN_SAO_PAULO: ${{ secrets.FLY_SAO_PAULO_CODER_PROXY_SESSION_TOKEN }}
TOKEN_JNB: ${{ secrets.FLY_JNB_CODER_PROXY_SESSION_TOKEN }}
-205
View File
@@ -1,205 +0,0 @@
# This workflow checks if a PR requires documentation updates.
# It creates a Coder Task that uses AI to analyze the PR changes,
# search existing docs, and comment with recommendations.
#
# Triggered by: Adding the "doc-check" label to a PR, or manual dispatch.
name: AI Documentation Check
on:
pull_request:
types:
- labeled
workflow_dispatch:
inputs:
pr_url:
description: "Pull Request URL to check"
required: true
type: string
template_preset:
description: "Template preset to use"
required: false
default: ""
type: string
jobs:
doc-check:
name: Analyze PR for Documentation Updates Needed
runs-on: ubuntu-latest
if: |
(github.event.label.name == 'doc-check' || github.event_name == 'workflow_dispatch') &&
(github.event.pull_request.draft == false || github.event_name == 'workflow_dispatch')
timeout-minutes: 30
env:
CODER_URL: ${{ secrets.DOC_CHECK_CODER_URL }}
CODER_SESSION_TOKEN: ${{ secrets.DOC_CHECK_CODER_SESSION_TOKEN }}
permissions:
contents: read
pull-requests: write
actions: write
steps:
- name: Determine PR Context
id: determine-context
env:
GITHUB_ACTOR: ${{ github.actor }}
GITHUB_EVENT_NAME: ${{ github.event_name }}
GITHUB_EVENT_PR_HTML_URL: ${{ github.event.pull_request.html_url }}
GITHUB_EVENT_PR_NUMBER: ${{ github.event.pull_request.number }}
GITHUB_EVENT_SENDER_ID: ${{ github.event.sender.id }}
GITHUB_EVENT_SENDER_LOGIN: ${{ github.event.sender.login }}
INPUTS_PR_URL: ${{ inputs.pr_url }}
INPUTS_TEMPLATE_PRESET: ${{ inputs.template_preset || '' }}
GH_TOKEN: ${{ github.token }}
run: |
echo "Using template preset: ${INPUTS_TEMPLATE_PRESET}"
echo "template_preset=${INPUTS_TEMPLATE_PRESET}" >> "${GITHUB_OUTPUT}"
# For workflow_dispatch, use the provided PR URL
if [[ "${GITHUB_EVENT_NAME}" == "workflow_dispatch" ]]; then
if ! GITHUB_USER_ID=$(gh api "users/${GITHUB_ACTOR}" --jq '.id'); then
echo "::error::Failed to get GitHub user ID for actor ${GITHUB_ACTOR}"
exit 1
fi
echo "Using workflow_dispatch actor: ${GITHUB_ACTOR} (ID: ${GITHUB_USER_ID})"
echo "github_user_id=${GITHUB_USER_ID}" >> "${GITHUB_OUTPUT}"
echo "github_username=${GITHUB_ACTOR}" >> "${GITHUB_OUTPUT}"
echo "Using PR URL: ${INPUTS_PR_URL}"
# Convert /pull/ to /issues/ for create-task-action compatibility
ISSUE_URL="${INPUTS_PR_URL/\/pull\//\/issues\/}"
echo "pr_url=${ISSUE_URL}" >> "${GITHUB_OUTPUT}"
# Extract PR number from URL for later use
PR_NUMBER=$(echo "${INPUTS_PR_URL}" | grep -oP '(?<=pull/)\d+')
echo "pr_number=${PR_NUMBER}" >> "${GITHUB_OUTPUT}"
elif [[ "${GITHUB_EVENT_NAME}" == "pull_request" ]]; then
GITHUB_USER_ID=${GITHUB_EVENT_SENDER_ID}
echo "Using label adder: ${GITHUB_EVENT_SENDER_LOGIN} (ID: ${GITHUB_USER_ID})"
echo "github_user_id=${GITHUB_USER_ID}" >> "${GITHUB_OUTPUT}"
echo "github_username=${GITHUB_EVENT_SENDER_LOGIN}" >> "${GITHUB_OUTPUT}"
echo "Using PR URL: ${GITHUB_EVENT_PR_HTML_URL}"
# Convert /pull/ to /issues/ for create-task-action compatibility
ISSUE_URL="${GITHUB_EVENT_PR_HTML_URL/\/pull\//\/issues\/}"
echo "pr_url=${ISSUE_URL}" >> "${GITHUB_OUTPUT}"
echo "pr_number=${GITHUB_EVENT_PR_NUMBER}" >> "${GITHUB_OUTPUT}"
else
echo "::error::Unsupported event type: ${GITHUB_EVENT_NAME}"
exit 1
fi
- name: Extract changed files and build prompt
id: extract-context
env:
PR_URL: ${{ steps.determine-context.outputs.pr_url }}
PR_NUMBER: ${{ steps.determine-context.outputs.pr_number }}
GH_TOKEN: ${{ github.token }}
run: |
echo "Analyzing PR #${PR_NUMBER}"
# Build task prompt - using unquoted heredoc so variables expand
TASK_PROMPT=$(cat <<EOF
Review PR #${PR_NUMBER} and determine if documentation needs updating or creating.
PR URL: ${PR_URL}
WORKFLOW:
1. Setup (repo is pre-cloned at ~/coder)
cd ~/coder
git fetch origin pull/${PR_NUMBER}/head:pr-${PR_NUMBER}
git checkout pr-${PR_NUMBER}
2. Get PR info
Use GitHub MCP tools to get PR title, body, and diff
Or use: git diff main...pr-${PR_NUMBER}
3. Understand Changes
Read the diff and identify what changed
Ask: Is this user-facing? Does it change behavior? Is it a new feature?
4. Search for Related Docs
cat ~/coder/docs/manifest.json | jq '.routes[] | {title, path}' | head -50
grep -ri "relevant_term" ~/coder/docs/ --include="*.md"
5. Decide
NEEDS DOCS if: New feature, API change, CLI change, behavior change, user-visible
NO DOCS if: Internal refactor, test-only, already documented, non-user-facing, dependency updates
FIRST check: Did this PR already update docs? If yes and complete, say "No Changes Needed"
6. Comment on the PR using this format
COMMENT FORMAT:
## 📚 Documentation Check
### ✅ Updates Needed
- **[docs/path/file.md](github_link)** - Brief what needs changing
### 📝 New Docs Needed
- **docs/suggested/location.md** - What should be documented
### ✨ No Changes Needed
[Reason: Documents already updated in PR | Internal changes only | Test-only | No user-facing impact]
---
*This comment was generated by an AI Agent through [Coder Tasks](https://coder.com/docs/ai-coder/tasks)*
DOCS STRUCTURE:
Read ~/coder/docs/manifest.json for the complete documentation structure.
Common areas include: reference/, admin/, user-guides/, ai-coder/, install/, tutorials/
But check manifest.json - it has everything.
EOF
)
# Output the prompt
{
echo "task_prompt<<EOFOUTPUT"
echo "${TASK_PROMPT}"
echo "EOFOUTPUT"
} >> "${GITHUB_OUTPUT}"
- name: Checkout create-task-action
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 1
path: ./.github/actions/create-task-action
persist-credentials: false
ref: main
repository: coder/create-task-action
- name: Create Coder Task for Documentation Check
id: create_task
uses: ./.github/actions/create-task-action
with:
coder-url: ${{ secrets.DOC_CHECK_CODER_URL }}
coder-token: ${{ secrets.DOC_CHECK_CODER_SESSION_TOKEN }}
coder-organization: "default"
coder-template-name: coder
coder-template-preset: ${{ steps.determine-context.outputs.template_preset }}
coder-task-name-prefix: doc-check
coder-task-prompt: ${{ steps.extract-context.outputs.task_prompt }}
github-user-id: ${{ steps.determine-context.outputs.github_user_id }}
github-token: ${{ github.token }}
github-issue-url: ${{ steps.determine-context.outputs.pr_url }}
comment-on-issue: true
- name: Write outputs
env:
TASK_CREATED: ${{ steps.create_task.outputs.task-created }}
TASK_NAME: ${{ steps.create_task.outputs.task-name }}
TASK_URL: ${{ steps.create_task.outputs.task-url }}
PR_URL: ${{ steps.determine-context.outputs.pr_url }}
run: |
{
echo "## Documentation Check Task"
echo ""
echo "**PR:** ${PR_URL}"
echo "**Task created:** ${TASK_CREATED}"
echo "**Task name:** ${TASK_NAME}"
echo "**Task URL:** ${TASK_URL}"
echo ""
echo "The Coder task is analyzing the PR changes and will comment with documentation recommendations."
} >> "${GITHUB_STEP_SUMMARY}"
+1 -1
View File
@@ -38,7 +38,7 @@ jobs:
if: github.repository_owner == 'coder'
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
+1 -1
View File
@@ -30,7 +30,7 @@ jobs:
- name: Setup Node
uses: ./.github/actions/setup-node
- uses: tj-actions/changed-files@70069877f29101175ed2b055d210fe8b1d54d7d7 # v45.0.7
- uses: tj-actions/changed-files@dbf178ceecb9304128c8e0648591d71208c6e2c9 # v45.0.7
id: changed-files
with:
files: |
+3 -3
View File
@@ -26,7 +26,7 @@ jobs:
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-4' || 'ubuntu-latest' }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -40,7 +40,7 @@ jobs:
with:
# Pinning to 2.28 here, as Nix gets a "error: [json.exception.type_error.302] type must be array, but is string"
# on version 2.29 and above.
nix_version: "2.28.5"
nix_version: "2.28.4"
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
with:
@@ -125,7 +125,7 @@ jobs:
id-token: write
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
+1 -1
View File
@@ -27,7 +27,7 @@ jobs:
- windows-2022
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
+1 -1
View File
@@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
+1 -1
View File
@@ -19,7 +19,7 @@ jobs:
packages: write
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
+5 -5
View File
@@ -39,7 +39,7 @@ jobs:
PR_OPEN: ${{ steps.check_pr.outputs.pr_open }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -76,7 +76,7 @@ jobs:
runs-on: "ubuntu-latest"
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -184,7 +184,7 @@ jobs:
pull-requests: write # needed for commenting on PRs
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -228,7 +228,7 @@ jobs:
CODER_IMAGE_TAG: ${{ needs.get_info.outputs.CODER_IMAGE_TAG }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -288,7 +288,7 @@ jobs:
PR_HOSTNAME: "pr${{ needs.get_info.outputs.PR_NUMBER }}.${{ secrets.PR_DEPLOYMENTS_DOMAIN }}"
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
+1 -1
View File
@@ -14,7 +14,7 @@ jobs:
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
+5 -5
View File
@@ -164,7 +164,7 @@ jobs:
version: ${{ steps.version.outputs.version }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -785,7 +785,7 @@ jobs:
- name: Send repository-dispatch event
if: ${{ !inputs.dry_run }}
uses: peter-evans/repository-dispatch@28959ce8df70de7be546dd1250a005dd32156697 # v4.0.1
uses: peter-evans/repository-dispatch@5fc4efd1a4797ddb68ffd0714a238564e4cc0e6f # v4.0.0
with:
token: ${{ secrets.CDRCI_GITHUB_TOKEN }}
repository: coder/packages
@@ -802,7 +802,7 @@ jobs:
# TODO: skip this if it's not a new release (i.e. a backport). This is
# fine right now because it just makes a PR that we can close.
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -878,7 +878,7 @@ jobs:
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -971,7 +971,7 @@ jobs:
if: ${{ !inputs.dry_run }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
+2 -2
View File
@@ -20,7 +20,7 @@ jobs:
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -47,6 +47,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@014f16e7ab1402f30e7c3329d33797e7948572db # v3.29.5
uses: github/codeql-action/upload-sarif@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v3.29.5
with:
sarif_file: results.sarif
+5 -5
View File
@@ -27,7 +27,7 @@ jobs:
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -40,7 +40,7 @@ jobs:
uses: ./.github/actions/setup-go
- name: Initialize CodeQL
uses: github/codeql-action/init@014f16e7ab1402f30e7c3329d33797e7948572db # v3.29.5
uses: github/codeql-action/init@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v3.29.5
with:
languages: go, javascript
@@ -50,7 +50,7 @@ jobs:
rm Makefile
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@014f16e7ab1402f30e7c3329d33797e7948572db # v3.29.5
uses: github/codeql-action/analyze@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v3.29.5
- name: Send Slack notification on failure
if: ${{ failure() }}
@@ -69,7 +69,7 @@ jobs:
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -154,7 +154,7 @@ jobs:
severity: "CRITICAL,HIGH"
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@014f16e7ab1402f30e7c3329d33797e7948572db # v3.29.5
uses: github/codeql-action/upload-sarif@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v3.29.5
with:
sarif_file: trivy-results.sarif
category: "Trivy"
+5 -5
View File
@@ -18,7 +18,7 @@ jobs:
pull-requests: write
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -96,7 +96,7 @@ jobs:
contents: write
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
@@ -120,12 +120,12 @@ jobs:
actions: write
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
- name: Delete PR Cleanup workflow runs
uses: Mattraks/delete-workflow-runs@5bf9a1dac5c4d041c029f0a8370ddf0c5cb5aeb7 # v2.1.0
uses: Mattraks/delete-workflow-runs@ab482449ba468316e9a8801e092d0405715c5e6d # v2.1.0
with:
token: ${{ github.token }}
repository: ${{ github.repository }}
@@ -134,7 +134,7 @@ jobs:
delete_workflow_pattern: pr-cleanup.yaml
- name: Delete PR Deploy workflow skipped runs
uses: Mattraks/delete-workflow-runs@5bf9a1dac5c4d041c029f0a8370ddf0c5cb5aeb7 # v2.1.0
uses: Mattraks/delete-workflow-runs@ab482449ba468316e9a8801e092d0405715c5e6d # v2.1.0
with:
token: ${{ github.token }}
repository: ${{ github.repository }}
+78 -51
View File
@@ -17,8 +17,8 @@ on:
type: string
template_preset:
description: "Template preset to use"
required: false
default: ""
required: true
default: "none"
type: string
prefix:
description: "Prefix for workspace name"
@@ -67,7 +67,7 @@ jobs:
GITHUB_EVENT_USER_LOGIN: ${{ github.event.sender.login }}
INPUTS_ISSUE_URL: ${{ inputs.issue_url }}
INPUTS_TEMPLATE_NAME: ${{ inputs.template_name || 'coder' }}
INPUTS_TEMPLATE_PRESET: ${{ inputs.template_preset || ''}}
INPUTS_TEMPLATE_PRESET: ${{ inputs.template_preset || 'none'}}
INPUTS_PREFIX: ${{ inputs.prefix || 'traiage' }}
GH_TOKEN: ${{ github.token }}
run: |
@@ -124,7 +124,7 @@ jobs:
exit 1
fi
- name: Extract context key and description from issue
- name: Extract context key from issue
id: extract-context
env:
ISSUE_URL: ${{ steps.determine-inputs.outputs.issue_url }}
@@ -132,59 +132,86 @@ jobs:
run: |
issue_number="$(gh issue view "${ISSUE_URL}" --json number --jq '.number')"
context_key="gh-${issue_number}"
TASK_PROMPT=$(cat <<EOF
Fix ${ISSUE_URL}
1. Use the gh CLI to read the issue description and comments.
2. Think carefully and try to understand the root cause. If the issue is unclear or not well defined, ask me to clarify and provide more information.
3. Write a proposed implementation plan to PLAN.md for me to review before starting implementation. Your plan should use TDD and only make the minimal changes necessary to fix the root cause.
4. When I approve your plan, start working on it. If you encounter issues with the plan, ask me for clarification and update the plan as required.
5. When you have finished implementation according to the plan, commit and push your changes, and create a PR using the gh CLI for me to review.
EOF
)
echo "context_key=${context_key}" >> "${GITHUB_OUTPUT}"
{
echo "TASK_PROMPT<<EOF"
echo "${TASK_PROMPT}"
echo "EOF"
} >> "${GITHUB_OUTPUT}"
echo "CONTEXT_KEY=${context_key}" >> "${GITHUB_ENV}"
- name: Download and install Coder binary
shell: bash
env:
CODER_URL: ${{ secrets.TRAIAGE_CODER_URL }}
run: |
if [ "${{ runner.arch }}" == "ARM64" ]; then
ARCH="arm64"
else
ARCH="amd64"
fi
mkdir -p "${HOME}/.local/bin"
curl -fsSL --compressed "$CODER_URL/bin/coder-linux-${ARCH}" -o "${HOME}/.local/bin/coder"
chmod +x "${HOME}/.local/bin/coder"
export PATH="$HOME/.local/bin:$PATH"
coder version
coder whoami
echo "$HOME/.local/bin" >> "${GITHUB_PATH}"
- name: Get Coder username from GitHub actor
id: get-coder-username
env:
CODER_SESSION_TOKEN: ${{ secrets.TRAIAGE_CODER_SESSION_TOKEN }}
GH_TOKEN: ${{ github.token }}
GITHUB_USER_ID: ${{ steps.determine-inputs.outputs.github_user_id }}
run: |
user_json=$(
coder users list --github-user-id="${GITHUB_USER_ID}" --output=json
)
coder_username=$(jq -r 'first | .username' <<< "$user_json")
[[ -z "${coder_username}" || "${coder_username}" == "null" ]] && echo "No Coder user with GitHub user ID ${GITHUB_USER_ID} found" && exit 1
echo "coder_username=${coder_username}" >> "${GITHUB_OUTPUT}"
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 1
path: ./.github/actions/create-task-action
persist-credentials: false
ref: main
repository: coder/create-task-action
fetch-depth: 0
- name: Create Coder Task
id: create_task
uses: ./.github/actions/create-task-action
with:
coder-url: ${{ secrets.TRAIAGE_CODER_URL }}
coder-token: ${{ secrets.TRAIAGE_CODER_SESSION_TOKEN }}
coder-organization: "default"
coder-template-name: coder
coder-template-preset: ${{ steps.determine-inputs.outputs.template_preset }}
coder-task-name-prefix: gh-coder
coder-task-prompt: ${{ steps.extract-context.outputs.task_prompt }}
github-user-id: ${{ steps.determine-inputs.outputs.github_user_id }}
github-token: ${{ github.token }}
github-issue-url: ${{ steps.determine-inputs.outputs.issue_url }}
comment-on-issue: ${{ startsWith(steps.determine-inputs.outputs.issue_url, format('{0}/{1}', github.server_url, github.repository)) }}
- name: Write outputs
# TODO(Cian): this is a good use-case for 'recipes'
- name: Create Coder task
id: create-task
env:
TASK_CREATED: ${{ steps.create_task.outputs.task-created }}
TASK_NAME: ${{ steps.create_task.outputs.task-name }}
TASK_URL: ${{ steps.create_task.outputs.task-url }}
CODER_USERNAME: ${{ steps.get-coder-username.outputs.coder_username }}
CONTEXT_KEY: ${{ steps.extract-context.outputs.context_key }}
GH_TOKEN: ${{ github.token }}
GITHUB_REPOSITORY: ${{ github.repository }}
ISSUE_URL: ${{ steps.determine-inputs.outputs.issue_url }}
PREFIX: ${{ steps.determine-inputs.outputs.prefix }}
RUN_ID: ${{ github.run_id }}
TEMPLATE_NAME: ${{ steps.determine-inputs.outputs.template_name }}
TEMPLATE_PARAMETERS: ${{ secrets.TRAIAGE_TEMPLATE_PARAMETERS }}
TEMPLATE_PRESET: ${{ steps.determine-inputs.outputs.template_preset }}
run: |
{
echo "**Task created:** ${TASK_CREATED}"
echo "**Task name:** ${TASK_NAME}"
echo "**Task URL**: ${TASK_URL}"
} >> "${GITHUB_STEP_SUMMARY}"
# Fetch issue description using `gh` CLI
#shellcheck disable=SC2016 # The template string should not be subject to shell expansion
issue_description=$(gh issue view "${ISSUE_URL}" \
--json 'title,body,comments' \
--template '{{printf "%s\n\n%s\n\nComments:\n" .title .body}}{{range $k, $v := .comments}} - {{index $v.author "login"}}: {{printf "%s\n" $v.body}}{{end}}')
# Write a prompt to PROMPT_FILE
PROMPT=$(cat <<EOF
Fix ${ISSUE_URL}
Analyze the below GitHub issue description, understand the root cause, and make appropriate changes to resolve the issue.
---
${issue_description}
EOF
)
export PROMPT
export TASK_NAME="${PREFIX}-${CONTEXT_KEY}-${RUN_ID}"
echo "Creating task: $TASK_NAME"
./scripts/traiage.sh create
if [[ "${ISSUE_URL}" == "https://github.com/${GITHUB_REPOSITORY}"* ]]; then
gh issue comment "${ISSUE_URL}" --body "Task created: https://dev.coder.com/tasks/${CODER_USERNAME}/${TASK_NAME}" --create-if-none --edit-last
else
echo "Skipping comment on other repo."
fi
echo "TASK_NAME=${CODER_USERNAME}/${TASK_NAME}" >> "${GITHUB_OUTPUT}"
echo "TASK_NAME=${CODER_USERNAME}/${TASK_NAME}" >> "${GITHUB_ENV}"
-1
View File
@@ -9,7 +9,6 @@ IST = "IST"
MacOS = "macOS"
AKS = "AKS"
O_WRONLY = "O_WRONLY"
AIBridge = "AI Bridge"
[default.extend-words]
AKS = "AKS"
+1 -1
View File
@@ -21,7 +21,7 @@ jobs:
pull-requests: write # required to post PR review comments by the action
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
-3
View File
@@ -91,6 +91,3 @@ __debug_bin*
**/.claude/settings.local.json
/.env
# Ignore plans written by AI agents.
PLAN.md
-2
View File
@@ -27,5 +27,3 @@ coderd/schedule/autostop.go @deansheather @DanielleMaywood
# well as guidance from revenue.
coderd/usage/ @deansheather @spikecurtis
enterprise/coderd/usage/ @deansheather @spikecurtis
.github/ @jdomeracki-coder
+8 -18
View File
@@ -636,17 +636,16 @@ TAILNETTEST_MOCKS := \
tailnet/tailnettest/subscriptionmock.go
AIBRIDGED_MOCKS := \
enterprise/aibridged/aibridgedmock/clientmock.go \
enterprise/aibridged/aibridgedmock/poolmock.go
enterprise/x/aibridged/aibridgedmock/clientmock.go \
enterprise/x/aibridged/aibridgedmock/poolmock.go
GEN_FILES := \
tailnet/proto/tailnet.pb.go \
agent/proto/agent.pb.go \
agent/agentsocket/proto/agentsocket.pb.go \
provisionersdk/proto/provisioner.pb.go \
provisionerd/proto/provisionerd.pb.go \
vpn/vpn.pb.go \
enterprise/aibridged/proto/aibridged.pb.go \
enterprise/x/aibridged/proto/aibridged.pb.go \
$(DB_GEN_FILES) \
$(SITE_GEN_FILES) \
coderd/rbac/object_gen.go \
@@ -697,9 +696,8 @@ gen/mark-fresh:
agent/proto/agent.pb.go \
provisionersdk/proto/provisioner.pb.go \
provisionerd/proto/provisionerd.pb.go \
agent/agentsocket/proto/agentsocket.pb.go \
vpn/vpn.pb.go \
enterprise/aibridged/proto/aibridged.pb.go \
enterprise/x/aibridged/proto/aibridged.pb.go \
coderd/database/dump.sql \
$(DB_GEN_FILES) \
site/src/api/typesGenerated.ts \
@@ -770,8 +768,8 @@ codersdk/workspacesdk/agentconnmock/agentconnmock.go: codersdk/workspacesdk/agen
go generate ./codersdk/workspacesdk/agentconnmock/
touch "$@"
$(AIBRIDGED_MOCKS): enterprise/aibridged/client.go enterprise/aibridged/pool.go
go generate ./enterprise/aibridged/aibridgedmock/
$(AIBRIDGED_MOCKS): enterprise/x/aibridged/client.go enterprise/x/aibridged/pool.go
go generate ./enterprise/x/aibridged/aibridgedmock/
touch "$@"
agent/agentcontainers/dcspec/dcspec_gen.go: \
@@ -802,14 +800,6 @@ agent/proto/agent.pb.go: agent/proto/agent.proto
--go-drpc_opt=paths=source_relative \
./agent/proto/agent.proto
agent/agentsocket/proto/agentsocket.pb.go: agent/agentsocket/proto/agentsocket.proto
protoc \
--go_out=. \
--go_opt=paths=source_relative \
--go-drpc_out=. \
--go-drpc_opt=paths=source_relative \
./agent/agentsocket/proto/agentsocket.proto
provisionersdk/proto/provisioner.pb.go: provisionersdk/proto/provisioner.proto
protoc \
--go_out=. \
@@ -832,13 +822,13 @@ vpn/vpn.pb.go: vpn/vpn.proto
--go_opt=paths=source_relative \
./vpn/vpn.proto
enterprise/aibridged/proto/aibridged.pb.go: enterprise/aibridged/proto/aibridged.proto
enterprise/x/aibridged/proto/aibridged.pb.go: enterprise/x/aibridged/proto/aibridged.proto
protoc \
--go_out=. \
--go_opt=paths=source_relative \
--go-drpc_out=. \
--go-drpc_opt=paths=source_relative \
./enterprise/aibridged/proto/aibridged.proto
./enterprise/x/aibridged/proto/aibridged.proto
site/src/api/typesGenerated.ts: site/node_modules/.installed $(wildcard scripts/apitypings/*) $(shell find ./codersdk $(FIND_EXCLUSIONS) -type f -name '*.go')
# -C sets the directory for the go run command
+49 -96
View File
@@ -8,7 +8,6 @@ import (
"fmt"
"hash/fnv"
"io"
"maps"
"net"
"net/http"
"net/netip"
@@ -41,7 +40,6 @@ import (
"github.com/coder/coder/v2/agent/agentcontainers"
"github.com/coder/coder/v2/agent/agentexec"
"github.com/coder/coder/v2/agent/agentscripts"
"github.com/coder/coder/v2/agent/agentsocket"
"github.com/coder/coder/v2/agent/agentssh"
"github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/agent/proto/resourcesmonitor"
@@ -72,21 +70,16 @@ const (
)
type Options struct {
Filesystem afero.Fs
LogDir string
TempDir string
ScriptDataDir string
Client Client
ReconnectingPTYTimeout time.Duration
EnvironmentVariables map[string]string
Logger slog.Logger
// IgnorePorts tells the api handler which ports to ignore when
// listing all listening ports. This is helpful to hide ports that
// are used by the agent, that the user does not care about.
IgnorePorts map[int]string
// ListeningPortsGetter is used to get the list of listening ports. Only
// tests should set this. If unset, a default that queries the OS will be used.
ListeningPortsGetter ListeningPortsGetter
Filesystem afero.Fs
LogDir string
TempDir string
ScriptDataDir string
Client Client
ReconnectingPTYTimeout time.Duration
EnvironmentVariables map[string]string
Logger slog.Logger
IgnorePorts map[int]string
PortCacheDuration time.Duration
SSHMaxTimeout time.Duration
TailnetListenPort uint16
Subsystems []codersdk.AgentSubsystem
@@ -98,8 +91,6 @@ type Options struct {
Devcontainers bool
DevcontainerAPIOptions []agentcontainers.Option // Enable Devcontainers for these to be effective.
Clock quartz.Clock
SocketServerEnabled bool
SocketPath string // Path for the agent socket server socket
}
type Client interface {
@@ -146,7 +137,9 @@ func New(options Options) Agent {
if options.ServiceBannerRefreshInterval == 0 {
options.ServiceBannerRefreshInterval = 2 * time.Minute
}
if options.PortCacheDuration == 0 {
options.PortCacheDuration = 1 * time.Second
}
if options.Clock == nil {
options.Clock = quartz.NewReal()
}
@@ -160,38 +153,30 @@ func New(options Options) Agent {
options.Execer = agentexec.DefaultExecer
}
if options.ListeningPortsGetter == nil {
options.ListeningPortsGetter = &osListeningPortsGetter{
cacheDuration: 1 * time.Second,
}
}
hardCtx, hardCancel := context.WithCancel(context.Background())
gracefulCtx, gracefulCancel := context.WithCancel(hardCtx)
a := &agent{
clock: options.Clock,
tailnetListenPort: options.TailnetListenPort,
reconnectingPTYTimeout: options.ReconnectingPTYTimeout,
logger: options.Logger,
gracefulCtx: gracefulCtx,
gracefulCancel: gracefulCancel,
hardCtx: hardCtx,
hardCancel: hardCancel,
coordDisconnected: make(chan struct{}),
environmentVariables: options.EnvironmentVariables,
client: options.Client,
filesystem: options.Filesystem,
logDir: options.LogDir,
tempDir: options.TempDir,
scriptDataDir: options.ScriptDataDir,
lifecycleUpdate: make(chan struct{}, 1),
lifecycleReported: make(chan codersdk.WorkspaceAgentLifecycle, 1),
lifecycleStates: []agentsdk.PostLifecycleRequest{{State: codersdk.WorkspaceAgentLifecycleCreated}},
reportConnectionsUpdate: make(chan struct{}, 1),
listeningPortsHandler: listeningPortsHandler{
getter: options.ListeningPortsGetter,
ignorePorts: maps.Clone(options.IgnorePorts),
},
clock: options.Clock,
tailnetListenPort: options.TailnetListenPort,
reconnectingPTYTimeout: options.ReconnectingPTYTimeout,
logger: options.Logger,
gracefulCtx: gracefulCtx,
gracefulCancel: gracefulCancel,
hardCtx: hardCtx,
hardCancel: hardCancel,
coordDisconnected: make(chan struct{}),
environmentVariables: options.EnvironmentVariables,
client: options.Client,
filesystem: options.Filesystem,
logDir: options.LogDir,
tempDir: options.TempDir,
scriptDataDir: options.ScriptDataDir,
lifecycleUpdate: make(chan struct{}, 1),
lifecycleReported: make(chan codersdk.WorkspaceAgentLifecycle, 1),
lifecycleStates: []agentsdk.PostLifecycleRequest{{State: codersdk.WorkspaceAgentLifecycleCreated}},
reportConnectionsUpdate: make(chan struct{}, 1),
ignorePorts: options.IgnorePorts,
portCacheDuration: options.PortCacheDuration,
reportMetadataInterval: options.ReportMetadataInterval,
announcementBannersRefreshInterval: options.ServiceBannerRefreshInterval,
sshMaxTimeout: options.SSHMaxTimeout,
@@ -205,8 +190,6 @@ func New(options Options) Agent {
devcontainers: options.Devcontainers,
containerAPIOptions: options.DevcontainerAPIOptions,
socketPath: options.SocketPath,
socketServerEnabled: options.SocketServerEnabled,
}
// Initially, we have a closed channel, reflecting the fact that we are not initially connected.
// Each time we connect we replace the channel (while holding the closeMutex) with a new one
@@ -219,16 +202,20 @@ func New(options Options) Agent {
}
type agent struct {
clock quartz.Clock
logger slog.Logger
client Client
tailnetListenPort uint16
filesystem afero.Fs
logDir string
tempDir string
scriptDataDir string
listeningPortsHandler listeningPortsHandler
subsystems []codersdk.AgentSubsystem
clock quartz.Clock
logger slog.Logger
client Client
tailnetListenPort uint16
filesystem afero.Fs
logDir string
tempDir string
scriptDataDir string
// ignorePorts tells the api handler which ports to ignore when
// listing all listening ports. This is helpful to hide ports that
// are used by the agent, that the user does not care about.
ignorePorts map[int]string
portCacheDuration time.Duration
subsystems []codersdk.AgentSubsystem
reconnectingPTYTimeout time.Duration
reconnectingPTYServer *reconnectingpty.Server
@@ -284,10 +271,6 @@ type agent struct {
devcontainers bool
containerAPIOptions []agentcontainers.Option
containerAPI *agentcontainers.API
socketServerEnabled bool
socketPath string
socketServer *agentsocket.Server
}
func (a *agent) TailnetConn() *tailnet.Conn {
@@ -367,32 +350,9 @@ func (a *agent) init() {
s.ExperimentalContainers = a.devcontainers
},
)
a.initSocketServer()
go a.runLoop()
}
// initSocketServer initializes server that allows direct communication with a workspace agent using IPC.
func (a *agent) initSocketServer() {
if !a.socketServerEnabled {
a.logger.Info(a.hardCtx, "socket server is disabled")
return
}
server, err := agentsocket.NewServer(
a.logger.Named("socket"),
agentsocket.WithPath(a.socketPath),
)
if err != nil {
a.logger.Warn(a.hardCtx, "failed to create socket server", slog.Error(err), slog.F("path", a.socketPath))
return
}
a.socketServer = server
a.logger.Debug(a.hardCtx, "socket server started", slog.F("path", a.socketPath))
}
// runLoop attempts to start the agent in a retry loop.
// Coder may be offline temporarily, a connection issue
// may be happening, but regardless after the intermittent
@@ -1127,7 +1087,7 @@ func (a *agent) handleManifest(manifestOK *checkpoint) func(ctx context.Context,
if err != nil {
return xerrors.Errorf("fetch metadata: %w", err)
}
a.logger.Info(ctx, "fetched manifest")
a.logger.Info(ctx, "fetched manifest", slog.F("manifest", mp))
manifest, err := agentsdk.ManifestFromProto(mp)
if err != nil {
a.logger.Critical(ctx, "failed to convert manifest", slog.F("manifest", mp), slog.Error(err))
@@ -1960,7 +1920,6 @@ func (a *agent) Close() error {
lifecycleState = codersdk.WorkspaceAgentLifecycleShutdownError
}
}
a.setLifecycle(lifecycleState)
err = a.scriptRunner.Close()
@@ -1968,12 +1927,6 @@ func (a *agent) Close() error {
a.logger.Error(a.hardCtx, "script runner close", slog.Error(err))
}
if a.socketServer != nil {
if err := a.socketServer.Close(); err != nil {
a.logger.Error(a.hardCtx, "socket server close", slog.Error(err))
}
}
if err := a.containerAPI.Close(); err != nil {
a.logger.Error(a.hardCtx, "container API close", slog.Error(err))
}
+2
View File
@@ -682,6 +682,8 @@ func (api *API) updaterLoop() {
} else {
prevErr = nil
}
default:
api.logger.Debug(api.ctx, "updater loop ticker skipped, update in progress")
}
return nil // Always nil to keep the ticker going.
-146
View File
@@ -1,146 +0,0 @@
package agentsocket
import (
"context"
"golang.org/x/xerrors"
"storj.io/drpc"
"storj.io/drpc/drpcconn"
"github.com/coder/coder/v2/agent/agentsocket/proto"
"github.com/coder/coder/v2/agent/unit"
)
// Option represents a configuration option for NewClient.
type Option func(*options)
type options struct {
path string
}
// WithPath sets the socket path. If not provided or empty, the client will
// auto-discover the default socket path.
func WithPath(path string) Option {
return func(opts *options) {
if path == "" {
return
}
opts.path = path
}
}
// Client provides a client for communicating with the workspace agentsocket API.
type Client struct {
client proto.DRPCAgentSocketClient
conn drpc.Conn
}
// NewClient creates a new socket client and opens a connection to the socket.
// If path is not provided via WithPath or is empty, it will auto-discover the
// default socket path.
func NewClient(ctx context.Context, opts ...Option) (*Client, error) {
options := &options{}
for _, opt := range opts {
opt(options)
}
conn, err := dialSocket(ctx, options.path)
if err != nil {
return nil, xerrors.Errorf("connect to socket: %w", err)
}
drpcConn := drpcconn.New(conn)
client := proto.NewDRPCAgentSocketClient(drpcConn)
return &Client{
client: client,
conn: drpcConn,
}, nil
}
// Close closes the socket connection.
func (c *Client) Close() error {
return c.conn.Close()
}
// Ping sends a ping request to the agent.
func (c *Client) Ping(ctx context.Context) error {
_, err := c.client.Ping(ctx, &proto.PingRequest{})
return err
}
// SyncStart starts a unit in the dependency graph.
func (c *Client) SyncStart(ctx context.Context, unitName unit.ID) error {
_, err := c.client.SyncStart(ctx, &proto.SyncStartRequest{
Unit: string(unitName),
})
return err
}
// SyncWant declares a dependency between units.
func (c *Client) SyncWant(ctx context.Context, unitName, dependsOn unit.ID) error {
_, err := c.client.SyncWant(ctx, &proto.SyncWantRequest{
Unit: string(unitName),
DependsOn: string(dependsOn),
})
return err
}
// SyncComplete marks a unit as complete in the dependency graph.
func (c *Client) SyncComplete(ctx context.Context, unitName unit.ID) error {
_, err := c.client.SyncComplete(ctx, &proto.SyncCompleteRequest{
Unit: string(unitName),
})
return err
}
// SyncReady requests whether a unit is ready to be started. That is, all dependencies are satisfied.
func (c *Client) SyncReady(ctx context.Context, unitName unit.ID) (bool, error) {
resp, err := c.client.SyncReady(ctx, &proto.SyncReadyRequest{
Unit: string(unitName),
})
return resp.Ready, err
}
// SyncStatus gets the status of a unit and its dependencies.
func (c *Client) SyncStatus(ctx context.Context, unitName unit.ID) (SyncStatusResponse, error) {
resp, err := c.client.SyncStatus(ctx, &proto.SyncStatusRequest{
Unit: string(unitName),
})
if err != nil {
return SyncStatusResponse{}, err
}
var dependencies []DependencyInfo
for _, dep := range resp.Dependencies {
dependencies = append(dependencies, DependencyInfo{
DependsOn: unit.ID(dep.DependsOn),
RequiredStatus: unit.Status(dep.RequiredStatus),
CurrentStatus: unit.Status(dep.CurrentStatus),
IsSatisfied: dep.IsSatisfied,
})
}
return SyncStatusResponse{
UnitName: unitName,
Status: unit.Status(resp.Status),
IsReady: resp.IsReady,
Dependencies: dependencies,
}, nil
}
// SyncStatusResponse contains the status information for a unit.
type SyncStatusResponse struct {
UnitName unit.ID `table:"unit,default_sort" json:"unit_name"`
Status unit.Status `table:"status" json:"status"`
IsReady bool `table:"ready" json:"is_ready"`
Dependencies []DependencyInfo `table:"dependencies" json:"dependencies"`
}
// DependencyInfo contains information about a unit dependency.
type DependencyInfo struct {
DependsOn unit.ID `table:"depends on,default_sort" json:"depends_on"`
RequiredStatus unit.Status `table:"required status" json:"required_status"`
CurrentStatus unit.Status `table:"current status" json:"current_status"`
IsSatisfied bool `table:"satisfied" json:"is_satisfied"`
}
-968
View File
@@ -1,968 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.30.0
// protoc v4.23.4
// source: agent/agentsocket/proto/agentsocket.proto
package proto
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type PingRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *PingRequest) Reset() {
*x = PingRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PingRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PingRequest) ProtoMessage() {}
func (x *PingRequest) ProtoReflect() protoreflect.Message {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PingRequest.ProtoReflect.Descriptor instead.
func (*PingRequest) Descriptor() ([]byte, []int) {
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{0}
}
type PingResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *PingResponse) Reset() {
*x = PingResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PingResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PingResponse) ProtoMessage() {}
func (x *PingResponse) ProtoReflect() protoreflect.Message {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PingResponse.ProtoReflect.Descriptor instead.
func (*PingResponse) Descriptor() ([]byte, []int) {
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{1}
}
type SyncStartRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
}
func (x *SyncStartRequest) Reset() {
*x = SyncStartRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SyncStartRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SyncStartRequest) ProtoMessage() {}
func (x *SyncStartRequest) ProtoReflect() protoreflect.Message {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SyncStartRequest.ProtoReflect.Descriptor instead.
func (*SyncStartRequest) Descriptor() ([]byte, []int) {
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{2}
}
func (x *SyncStartRequest) GetUnit() string {
if x != nil {
return x.Unit
}
return ""
}
type SyncStartResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *SyncStartResponse) Reset() {
*x = SyncStartResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SyncStartResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SyncStartResponse) ProtoMessage() {}
func (x *SyncStartResponse) ProtoReflect() protoreflect.Message {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SyncStartResponse.ProtoReflect.Descriptor instead.
func (*SyncStartResponse) Descriptor() ([]byte, []int) {
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{3}
}
type SyncWantRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
DependsOn string `protobuf:"bytes,2,opt,name=depends_on,json=dependsOn,proto3" json:"depends_on,omitempty"`
}
func (x *SyncWantRequest) Reset() {
*x = SyncWantRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SyncWantRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SyncWantRequest) ProtoMessage() {}
func (x *SyncWantRequest) ProtoReflect() protoreflect.Message {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SyncWantRequest.ProtoReflect.Descriptor instead.
func (*SyncWantRequest) Descriptor() ([]byte, []int) {
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{4}
}
func (x *SyncWantRequest) GetUnit() string {
if x != nil {
return x.Unit
}
return ""
}
func (x *SyncWantRequest) GetDependsOn() string {
if x != nil {
return x.DependsOn
}
return ""
}
type SyncWantResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *SyncWantResponse) Reset() {
*x = SyncWantResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SyncWantResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SyncWantResponse) ProtoMessage() {}
func (x *SyncWantResponse) ProtoReflect() protoreflect.Message {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SyncWantResponse.ProtoReflect.Descriptor instead.
func (*SyncWantResponse) Descriptor() ([]byte, []int) {
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{5}
}
type SyncCompleteRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
}
func (x *SyncCompleteRequest) Reset() {
*x = SyncCompleteRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SyncCompleteRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SyncCompleteRequest) ProtoMessage() {}
func (x *SyncCompleteRequest) ProtoReflect() protoreflect.Message {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SyncCompleteRequest.ProtoReflect.Descriptor instead.
func (*SyncCompleteRequest) Descriptor() ([]byte, []int) {
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{6}
}
func (x *SyncCompleteRequest) GetUnit() string {
if x != nil {
return x.Unit
}
return ""
}
type SyncCompleteResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *SyncCompleteResponse) Reset() {
*x = SyncCompleteResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SyncCompleteResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SyncCompleteResponse) ProtoMessage() {}
func (x *SyncCompleteResponse) ProtoReflect() protoreflect.Message {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SyncCompleteResponse.ProtoReflect.Descriptor instead.
func (*SyncCompleteResponse) Descriptor() ([]byte, []int) {
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{7}
}
type SyncReadyRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
}
func (x *SyncReadyRequest) Reset() {
*x = SyncReadyRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SyncReadyRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SyncReadyRequest) ProtoMessage() {}
func (x *SyncReadyRequest) ProtoReflect() protoreflect.Message {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SyncReadyRequest.ProtoReflect.Descriptor instead.
func (*SyncReadyRequest) Descriptor() ([]byte, []int) {
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{8}
}
func (x *SyncReadyRequest) GetUnit() string {
if x != nil {
return x.Unit
}
return ""
}
type SyncReadyResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Ready bool `protobuf:"varint,1,opt,name=ready,proto3" json:"ready,omitempty"`
}
func (x *SyncReadyResponse) Reset() {
*x = SyncReadyResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SyncReadyResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SyncReadyResponse) ProtoMessage() {}
func (x *SyncReadyResponse) ProtoReflect() protoreflect.Message {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SyncReadyResponse.ProtoReflect.Descriptor instead.
func (*SyncReadyResponse) Descriptor() ([]byte, []int) {
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{9}
}
func (x *SyncReadyResponse) GetReady() bool {
if x != nil {
return x.Ready
}
return false
}
type SyncStatusRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
}
func (x *SyncStatusRequest) Reset() {
*x = SyncStatusRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SyncStatusRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SyncStatusRequest) ProtoMessage() {}
func (x *SyncStatusRequest) ProtoReflect() protoreflect.Message {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SyncStatusRequest.ProtoReflect.Descriptor instead.
func (*SyncStatusRequest) Descriptor() ([]byte, []int) {
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{10}
}
func (x *SyncStatusRequest) GetUnit() string {
if x != nil {
return x.Unit
}
return ""
}
type DependencyInfo struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
DependsOn string `protobuf:"bytes,2,opt,name=depends_on,json=dependsOn,proto3" json:"depends_on,omitempty"`
RequiredStatus string `protobuf:"bytes,3,opt,name=required_status,json=requiredStatus,proto3" json:"required_status,omitempty"`
CurrentStatus string `protobuf:"bytes,4,opt,name=current_status,json=currentStatus,proto3" json:"current_status,omitempty"`
IsSatisfied bool `protobuf:"varint,5,opt,name=is_satisfied,json=isSatisfied,proto3" json:"is_satisfied,omitempty"`
}
func (x *DependencyInfo) Reset() {
*x = DependencyInfo{}
if protoimpl.UnsafeEnabled {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DependencyInfo) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DependencyInfo) ProtoMessage() {}
func (x *DependencyInfo) ProtoReflect() protoreflect.Message {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DependencyInfo.ProtoReflect.Descriptor instead.
func (*DependencyInfo) Descriptor() ([]byte, []int) {
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{11}
}
func (x *DependencyInfo) GetUnit() string {
if x != nil {
return x.Unit
}
return ""
}
func (x *DependencyInfo) GetDependsOn() string {
if x != nil {
return x.DependsOn
}
return ""
}
func (x *DependencyInfo) GetRequiredStatus() string {
if x != nil {
return x.RequiredStatus
}
return ""
}
func (x *DependencyInfo) GetCurrentStatus() string {
if x != nil {
return x.CurrentStatus
}
return ""
}
func (x *DependencyInfo) GetIsSatisfied() bool {
if x != nil {
return x.IsSatisfied
}
return false
}
type SyncStatusResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"`
IsReady bool `protobuf:"varint,2,opt,name=is_ready,json=isReady,proto3" json:"is_ready,omitempty"`
Dependencies []*DependencyInfo `protobuf:"bytes,3,rep,name=dependencies,proto3" json:"dependencies,omitempty"`
}
func (x *SyncStatusResponse) Reset() {
*x = SyncStatusResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SyncStatusResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SyncStatusResponse) ProtoMessage() {}
func (x *SyncStatusResponse) ProtoReflect() protoreflect.Message {
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[12]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SyncStatusResponse.ProtoReflect.Descriptor instead.
func (*SyncStatusResponse) Descriptor() ([]byte, []int) {
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{12}
}
func (x *SyncStatusResponse) GetStatus() string {
if x != nil {
return x.Status
}
return ""
}
func (x *SyncStatusResponse) GetIsReady() bool {
if x != nil {
return x.IsReady
}
return false
}
func (x *SyncStatusResponse) GetDependencies() []*DependencyInfo {
if x != nil {
return x.Dependencies
}
return nil
}
var File_agent_agentsocket_proto_agentsocket_proto protoreflect.FileDescriptor
var file_agent_agentsocket_proto_agentsocket_proto_rawDesc = []byte{
0x0a, 0x29, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63,
0x6b, 0x65, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73,
0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x63, 0x6f, 0x64,
0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x76,
0x31, 0x22, 0x0d, 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x22, 0x0e, 0x0a, 0x0c, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x26, 0x0a, 0x10, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x22, 0x13, 0x0a, 0x11, 0x53, 0x79, 0x6e, 0x63,
0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x44, 0x0a,
0x0f, 0x53, 0x79, 0x6e, 0x63, 0x57, 0x61, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
0x75, 0x6e, 0x69, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x73, 0x5f,
0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64,
0x73, 0x4f, 0x6e, 0x22, 0x12, 0x0a, 0x10, 0x53, 0x79, 0x6e, 0x63, 0x57, 0x61, 0x6e, 0x74, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x0a, 0x13, 0x53, 0x79, 0x6e, 0x63, 0x43,
0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12,
0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e,
0x69, 0x74, 0x22, 0x16, 0x0a, 0x14, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65,
0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x0a, 0x10, 0x53, 0x79,
0x6e, 0x63, 0x52, 0x65, 0x61, 0x64, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12,
0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e,
0x69, 0x74, 0x22, 0x29, 0x0a, 0x11, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x61, 0x64, 0x79, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79,
0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x22, 0x27, 0x0a,
0x11, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x22, 0xb6, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x70, 0x65, 0x6e,
0x64, 0x65, 0x6e, 0x63, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69,
0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x12, 0x1d, 0x0a,
0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x73, 0x5f, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
0x09, 0x52, 0x09, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x73, 0x4f, 0x6e, 0x12, 0x27, 0x0a, 0x0f,
0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x53,
0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74,
0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63,
0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x21, 0x0a, 0x0c,
0x69, 0x73, 0x5f, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01,
0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x53, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x64, 0x22,
0x91, 0x01, 0x0a, 0x12, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19,
0x0a, 0x08, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
0x52, 0x07, 0x69, 0x73, 0x52, 0x65, 0x61, 0x64, 0x79, 0x12, 0x48, 0x0a, 0x0c, 0x64, 0x65, 0x70,
0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x24, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63,
0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63,
0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0c, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63,
0x69, 0x65, 0x73, 0x32, 0xbb, 0x04, 0x0a, 0x0b, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x63,
0x6b, 0x65, 0x74, 0x12, 0x4d, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x21, 0x2e, 0x63, 0x6f,
0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e,
0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22,
0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b,
0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x12, 0x5c, 0x0a, 0x09, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12,
0x26, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63,
0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e,
0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53,
0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x12, 0x59, 0x0a, 0x08, 0x53, 0x79, 0x6e, 0x63, 0x57, 0x61, 0x6e, 0x74, 0x12, 0x25, 0x2e, 0x63,
0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74,
0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x57, 0x61, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e,
0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x57,
0x61, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x0c, 0x53,
0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x29, 0x2e, 0x63, 0x6f,
0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e,
0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61,
0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79,
0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x12, 0x5c, 0x0a, 0x09, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x61, 0x64, 0x79, 0x12,
0x26, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63,
0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x61, 0x64, 0x79,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e,
0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53,
0x79, 0x6e, 0x63, 0x52, 0x65, 0x61, 0x64, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x12, 0x5f, 0x0a, 0x0a, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x27,
0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b,
0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e,
0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53,
0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x61,
0x67, 0x65, 0x6e, 0x74, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74,
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_agent_agentsocket_proto_agentsocket_proto_rawDescOnce sync.Once
file_agent_agentsocket_proto_agentsocket_proto_rawDescData = file_agent_agentsocket_proto_agentsocket_proto_rawDesc
)
func file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP() []byte {
file_agent_agentsocket_proto_agentsocket_proto_rawDescOnce.Do(func() {
file_agent_agentsocket_proto_agentsocket_proto_rawDescData = protoimpl.X.CompressGZIP(file_agent_agentsocket_proto_agentsocket_proto_rawDescData)
})
return file_agent_agentsocket_proto_agentsocket_proto_rawDescData
}
var file_agent_agentsocket_proto_agentsocket_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
var file_agent_agentsocket_proto_agentsocket_proto_goTypes = []interface{}{
(*PingRequest)(nil), // 0: coder.agentsocket.v1.PingRequest
(*PingResponse)(nil), // 1: coder.agentsocket.v1.PingResponse
(*SyncStartRequest)(nil), // 2: coder.agentsocket.v1.SyncStartRequest
(*SyncStartResponse)(nil), // 3: coder.agentsocket.v1.SyncStartResponse
(*SyncWantRequest)(nil), // 4: coder.agentsocket.v1.SyncWantRequest
(*SyncWantResponse)(nil), // 5: coder.agentsocket.v1.SyncWantResponse
(*SyncCompleteRequest)(nil), // 6: coder.agentsocket.v1.SyncCompleteRequest
(*SyncCompleteResponse)(nil), // 7: coder.agentsocket.v1.SyncCompleteResponse
(*SyncReadyRequest)(nil), // 8: coder.agentsocket.v1.SyncReadyRequest
(*SyncReadyResponse)(nil), // 9: coder.agentsocket.v1.SyncReadyResponse
(*SyncStatusRequest)(nil), // 10: coder.agentsocket.v1.SyncStatusRequest
(*DependencyInfo)(nil), // 11: coder.agentsocket.v1.DependencyInfo
(*SyncStatusResponse)(nil), // 12: coder.agentsocket.v1.SyncStatusResponse
}
var file_agent_agentsocket_proto_agentsocket_proto_depIdxs = []int32{
11, // 0: coder.agentsocket.v1.SyncStatusResponse.dependencies:type_name -> coder.agentsocket.v1.DependencyInfo
0, // 1: coder.agentsocket.v1.AgentSocket.Ping:input_type -> coder.agentsocket.v1.PingRequest
2, // 2: coder.agentsocket.v1.AgentSocket.SyncStart:input_type -> coder.agentsocket.v1.SyncStartRequest
4, // 3: coder.agentsocket.v1.AgentSocket.SyncWant:input_type -> coder.agentsocket.v1.SyncWantRequest
6, // 4: coder.agentsocket.v1.AgentSocket.SyncComplete:input_type -> coder.agentsocket.v1.SyncCompleteRequest
8, // 5: coder.agentsocket.v1.AgentSocket.SyncReady:input_type -> coder.agentsocket.v1.SyncReadyRequest
10, // 6: coder.agentsocket.v1.AgentSocket.SyncStatus:input_type -> coder.agentsocket.v1.SyncStatusRequest
1, // 7: coder.agentsocket.v1.AgentSocket.Ping:output_type -> coder.agentsocket.v1.PingResponse
3, // 8: coder.agentsocket.v1.AgentSocket.SyncStart:output_type -> coder.agentsocket.v1.SyncStartResponse
5, // 9: coder.agentsocket.v1.AgentSocket.SyncWant:output_type -> coder.agentsocket.v1.SyncWantResponse
7, // 10: coder.agentsocket.v1.AgentSocket.SyncComplete:output_type -> coder.agentsocket.v1.SyncCompleteResponse
9, // 11: coder.agentsocket.v1.AgentSocket.SyncReady:output_type -> coder.agentsocket.v1.SyncReadyResponse
12, // 12: coder.agentsocket.v1.AgentSocket.SyncStatus:output_type -> coder.agentsocket.v1.SyncStatusResponse
7, // [7:13] is the sub-list for method output_type
1, // [1:7] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_agent_agentsocket_proto_agentsocket_proto_init() }
func file_agent_agentsocket_proto_agentsocket_proto_init() {
if File_agent_agentsocket_proto_agentsocket_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PingRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PingResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SyncStartRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SyncStartResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SyncWantRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SyncWantResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SyncCompleteRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SyncCompleteResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SyncReadyRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SyncReadyResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SyncStatusRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DependencyInfo); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SyncStatusResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_agent_agentsocket_proto_agentsocket_proto_rawDesc,
NumEnums: 0,
NumMessages: 13,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_agent_agentsocket_proto_agentsocket_proto_goTypes,
DependencyIndexes: file_agent_agentsocket_proto_agentsocket_proto_depIdxs,
MessageInfos: file_agent_agentsocket_proto_agentsocket_proto_msgTypes,
}.Build()
File_agent_agentsocket_proto_agentsocket_proto = out.File
file_agent_agentsocket_proto_agentsocket_proto_rawDesc = nil
file_agent_agentsocket_proto_agentsocket_proto_goTypes = nil
file_agent_agentsocket_proto_agentsocket_proto_depIdxs = nil
}
-69
View File
@@ -1,69 +0,0 @@
syntax = "proto3";
option go_package = "github.com/coder/coder/v2/agent/agentsocket/proto";
package coder.agentsocket.v1;
message PingRequest {}
message PingResponse {}
message SyncStartRequest {
string unit = 1;
}
message SyncStartResponse {}
message SyncWantRequest {
string unit = 1;
string depends_on = 2;
}
message SyncWantResponse {}
message SyncCompleteRequest {
string unit = 1;
}
message SyncCompleteResponse {}
message SyncReadyRequest {
string unit = 1;
}
message SyncReadyResponse {
bool ready = 1;
}
message SyncStatusRequest {
string unit = 1;
}
message DependencyInfo {
string unit = 1;
string depends_on = 2;
string required_status = 3;
string current_status = 4;
bool is_satisfied = 5;
}
message SyncStatusResponse {
string status = 1;
bool is_ready = 2;
repeated DependencyInfo dependencies = 3;
}
// AgentSocket provides direct access to the agent over local IPC.
service AgentSocket {
// Ping the agent to check if it is alive.
rpc Ping(PingRequest) returns (PingResponse);
// Report the start of a unit.
rpc SyncStart(SyncStartRequest) returns (SyncStartResponse);
// Declare a dependency between units.
rpc SyncWant(SyncWantRequest) returns (SyncWantResponse);
// Report the completion of a unit.
rpc SyncComplete(SyncCompleteRequest) returns (SyncCompleteResponse);
// Request whether a unit is ready to be started. That is, all dependencies are satisfied.
rpc SyncReady(SyncReadyRequest) returns (SyncReadyResponse);
// Get the status of a unit and list its dependencies.
rpc SyncStatus(SyncStatusRequest) returns (SyncStatusResponse);
}
@@ -1,311 +0,0 @@
// Code generated by protoc-gen-go-drpc. DO NOT EDIT.
// protoc-gen-go-drpc version: v0.0.34
// source: agent/agentsocket/proto/agentsocket.proto
package proto
import (
context "context"
errors "errors"
protojson "google.golang.org/protobuf/encoding/protojson"
proto "google.golang.org/protobuf/proto"
drpc "storj.io/drpc"
drpcerr "storj.io/drpc/drpcerr"
)
type drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto struct{}
func (drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto) Marshal(msg drpc.Message) ([]byte, error) {
return proto.Marshal(msg.(proto.Message))
}
func (drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto) MarshalAppend(buf []byte, msg drpc.Message) ([]byte, error) {
return proto.MarshalOptions{}.MarshalAppend(buf, msg.(proto.Message))
}
func (drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto) Unmarshal(buf []byte, msg drpc.Message) error {
return proto.Unmarshal(buf, msg.(proto.Message))
}
func (drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto) JSONMarshal(msg drpc.Message) ([]byte, error) {
return protojson.Marshal(msg.(proto.Message))
}
func (drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto) JSONUnmarshal(buf []byte, msg drpc.Message) error {
return protojson.Unmarshal(buf, msg.(proto.Message))
}
type DRPCAgentSocketClient interface {
DRPCConn() drpc.Conn
Ping(ctx context.Context, in *PingRequest) (*PingResponse, error)
SyncStart(ctx context.Context, in *SyncStartRequest) (*SyncStartResponse, error)
SyncWant(ctx context.Context, in *SyncWantRequest) (*SyncWantResponse, error)
SyncComplete(ctx context.Context, in *SyncCompleteRequest) (*SyncCompleteResponse, error)
SyncReady(ctx context.Context, in *SyncReadyRequest) (*SyncReadyResponse, error)
SyncStatus(ctx context.Context, in *SyncStatusRequest) (*SyncStatusResponse, error)
}
type drpcAgentSocketClient struct {
cc drpc.Conn
}
func NewDRPCAgentSocketClient(cc drpc.Conn) DRPCAgentSocketClient {
return &drpcAgentSocketClient{cc}
}
func (c *drpcAgentSocketClient) DRPCConn() drpc.Conn { return c.cc }
func (c *drpcAgentSocketClient) Ping(ctx context.Context, in *PingRequest) (*PingResponse, error) {
out := new(PingResponse)
err := c.cc.Invoke(ctx, "/coder.agentsocket.v1.AgentSocket/Ping", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}, in, out)
if err != nil {
return nil, err
}
return out, nil
}
func (c *drpcAgentSocketClient) SyncStart(ctx context.Context, in *SyncStartRequest) (*SyncStartResponse, error) {
out := new(SyncStartResponse)
err := c.cc.Invoke(ctx, "/coder.agentsocket.v1.AgentSocket/SyncStart", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}, in, out)
if err != nil {
return nil, err
}
return out, nil
}
func (c *drpcAgentSocketClient) SyncWant(ctx context.Context, in *SyncWantRequest) (*SyncWantResponse, error) {
out := new(SyncWantResponse)
err := c.cc.Invoke(ctx, "/coder.agentsocket.v1.AgentSocket/SyncWant", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}, in, out)
if err != nil {
return nil, err
}
return out, nil
}
func (c *drpcAgentSocketClient) SyncComplete(ctx context.Context, in *SyncCompleteRequest) (*SyncCompleteResponse, error) {
out := new(SyncCompleteResponse)
err := c.cc.Invoke(ctx, "/coder.agentsocket.v1.AgentSocket/SyncComplete", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}, in, out)
if err != nil {
return nil, err
}
return out, nil
}
func (c *drpcAgentSocketClient) SyncReady(ctx context.Context, in *SyncReadyRequest) (*SyncReadyResponse, error) {
out := new(SyncReadyResponse)
err := c.cc.Invoke(ctx, "/coder.agentsocket.v1.AgentSocket/SyncReady", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}, in, out)
if err != nil {
return nil, err
}
return out, nil
}
func (c *drpcAgentSocketClient) SyncStatus(ctx context.Context, in *SyncStatusRequest) (*SyncStatusResponse, error) {
out := new(SyncStatusResponse)
err := c.cc.Invoke(ctx, "/coder.agentsocket.v1.AgentSocket/SyncStatus", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}, in, out)
if err != nil {
return nil, err
}
return out, nil
}
type DRPCAgentSocketServer interface {
Ping(context.Context, *PingRequest) (*PingResponse, error)
SyncStart(context.Context, *SyncStartRequest) (*SyncStartResponse, error)
SyncWant(context.Context, *SyncWantRequest) (*SyncWantResponse, error)
SyncComplete(context.Context, *SyncCompleteRequest) (*SyncCompleteResponse, error)
SyncReady(context.Context, *SyncReadyRequest) (*SyncReadyResponse, error)
SyncStatus(context.Context, *SyncStatusRequest) (*SyncStatusResponse, error)
}
type DRPCAgentSocketUnimplementedServer struct{}
func (s *DRPCAgentSocketUnimplementedServer) Ping(context.Context, *PingRequest) (*PingResponse, error) {
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
}
func (s *DRPCAgentSocketUnimplementedServer) SyncStart(context.Context, *SyncStartRequest) (*SyncStartResponse, error) {
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
}
func (s *DRPCAgentSocketUnimplementedServer) SyncWant(context.Context, *SyncWantRequest) (*SyncWantResponse, error) {
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
}
func (s *DRPCAgentSocketUnimplementedServer) SyncComplete(context.Context, *SyncCompleteRequest) (*SyncCompleteResponse, error) {
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
}
func (s *DRPCAgentSocketUnimplementedServer) SyncReady(context.Context, *SyncReadyRequest) (*SyncReadyResponse, error) {
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
}
func (s *DRPCAgentSocketUnimplementedServer) SyncStatus(context.Context, *SyncStatusRequest) (*SyncStatusResponse, error) {
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
}
type DRPCAgentSocketDescription struct{}
func (DRPCAgentSocketDescription) NumMethods() int { return 6 }
func (DRPCAgentSocketDescription) Method(n int) (string, drpc.Encoding, drpc.Receiver, interface{}, bool) {
switch n {
case 0:
return "/coder.agentsocket.v1.AgentSocket/Ping", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{},
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCAgentSocketServer).
Ping(
ctx,
in1.(*PingRequest),
)
}, DRPCAgentSocketServer.Ping, true
case 1:
return "/coder.agentsocket.v1.AgentSocket/SyncStart", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{},
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCAgentSocketServer).
SyncStart(
ctx,
in1.(*SyncStartRequest),
)
}, DRPCAgentSocketServer.SyncStart, true
case 2:
return "/coder.agentsocket.v1.AgentSocket/SyncWant", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{},
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCAgentSocketServer).
SyncWant(
ctx,
in1.(*SyncWantRequest),
)
}, DRPCAgentSocketServer.SyncWant, true
case 3:
return "/coder.agentsocket.v1.AgentSocket/SyncComplete", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{},
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCAgentSocketServer).
SyncComplete(
ctx,
in1.(*SyncCompleteRequest),
)
}, DRPCAgentSocketServer.SyncComplete, true
case 4:
return "/coder.agentsocket.v1.AgentSocket/SyncReady", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{},
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCAgentSocketServer).
SyncReady(
ctx,
in1.(*SyncReadyRequest),
)
}, DRPCAgentSocketServer.SyncReady, true
case 5:
return "/coder.agentsocket.v1.AgentSocket/SyncStatus", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{},
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCAgentSocketServer).
SyncStatus(
ctx,
in1.(*SyncStatusRequest),
)
}, DRPCAgentSocketServer.SyncStatus, true
default:
return "", nil, nil, nil, false
}
}
func DRPCRegisterAgentSocket(mux drpc.Mux, impl DRPCAgentSocketServer) error {
return mux.Register(impl, DRPCAgentSocketDescription{})
}
type DRPCAgentSocket_PingStream interface {
drpc.Stream
SendAndClose(*PingResponse) error
}
type drpcAgentSocket_PingStream struct {
drpc.Stream
}
func (x *drpcAgentSocket_PingStream) SendAndClose(m *PingResponse) error {
if err := x.MsgSend(m, drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}); err != nil {
return err
}
return x.CloseSend()
}
type DRPCAgentSocket_SyncStartStream interface {
drpc.Stream
SendAndClose(*SyncStartResponse) error
}
type drpcAgentSocket_SyncStartStream struct {
drpc.Stream
}
func (x *drpcAgentSocket_SyncStartStream) SendAndClose(m *SyncStartResponse) error {
if err := x.MsgSend(m, drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}); err != nil {
return err
}
return x.CloseSend()
}
type DRPCAgentSocket_SyncWantStream interface {
drpc.Stream
SendAndClose(*SyncWantResponse) error
}
type drpcAgentSocket_SyncWantStream struct {
drpc.Stream
}
func (x *drpcAgentSocket_SyncWantStream) SendAndClose(m *SyncWantResponse) error {
if err := x.MsgSend(m, drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}); err != nil {
return err
}
return x.CloseSend()
}
type DRPCAgentSocket_SyncCompleteStream interface {
drpc.Stream
SendAndClose(*SyncCompleteResponse) error
}
type drpcAgentSocket_SyncCompleteStream struct {
drpc.Stream
}
func (x *drpcAgentSocket_SyncCompleteStream) SendAndClose(m *SyncCompleteResponse) error {
if err := x.MsgSend(m, drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}); err != nil {
return err
}
return x.CloseSend()
}
type DRPCAgentSocket_SyncReadyStream interface {
drpc.Stream
SendAndClose(*SyncReadyResponse) error
}
type drpcAgentSocket_SyncReadyStream struct {
drpc.Stream
}
func (x *drpcAgentSocket_SyncReadyStream) SendAndClose(m *SyncReadyResponse) error {
if err := x.MsgSend(m, drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}); err != nil {
return err
}
return x.CloseSend()
}
type DRPCAgentSocket_SyncStatusStream interface {
drpc.Stream
SendAndClose(*SyncStatusResponse) error
}
type drpcAgentSocket_SyncStatusStream struct {
drpc.Stream
}
func (x *drpcAgentSocket_SyncStatusStream) SendAndClose(m *SyncStatusResponse) error {
if err := x.MsgSend(m, drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}); err != nil {
return err
}
return x.CloseSend()
}
-17
View File
@@ -1,17 +0,0 @@
package proto
import "github.com/coder/coder/v2/apiversion"
// Version history:
//
// API v1.0:
// - Initial release
// - Ping
// - Sync operations: SyncStart, SyncWant, SyncComplete, SyncWait, SyncStatus
const (
CurrentMajor = 1
CurrentMinor = 0
)
var CurrentVersion = apiversion.New(CurrentMajor, CurrentMinor)
-138
View File
@@ -1,138 +0,0 @@
package agentsocket
import (
"context"
"errors"
"net"
"sync"
"golang.org/x/xerrors"
"storj.io/drpc/drpcmux"
"storj.io/drpc/drpcserver"
"cdr.dev/slog"
"github.com/coder/coder/v2/agent/agentsocket/proto"
"github.com/coder/coder/v2/agent/unit"
"github.com/coder/coder/v2/codersdk/drpcsdk"
)
// Server provides access to the DRPCAgentSocketService via a Unix domain socket.
// Do not invoke Server{} directly. Use NewServer() instead.
type Server struct {
logger slog.Logger
path string
drpcServer *drpcserver.Server
service *DRPCAgentSocketService
mu sync.Mutex
listener net.Listener
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
}
// NewServer creates a new agent socket server.
func NewServer(logger slog.Logger, opts ...Option) (*Server, error) {
options := &options{}
for _, opt := range opts {
opt(options)
}
logger = logger.Named("agentsocket-server")
server := &Server{
logger: logger,
path: options.path,
service: &DRPCAgentSocketService{
logger: logger,
unitManager: unit.NewManager(),
},
}
mux := drpcmux.New()
err := proto.DRPCRegisterAgentSocket(mux, server.service)
if err != nil {
return nil, xerrors.Errorf("failed to register drpc service: %w", err)
}
server.drpcServer = drpcserver.NewWithOptions(mux, drpcserver.Options{
Manager: drpcsdk.DefaultDRPCOptions(nil),
Log: func(err error) {
if errors.Is(err, context.Canceled) ||
errors.Is(err, context.DeadlineExceeded) {
return
}
logger.Debug(context.Background(), "drpc server error", slog.Error(err))
},
})
listener, err := createSocket(server.path)
if err != nil {
return nil, xerrors.Errorf("create socket: %w", err)
}
server.listener = listener
// This context is canceled by server.Close().
// canceling it will close all connections.
server.ctx, server.cancel = context.WithCancel(context.Background())
server.logger.Info(server.ctx, "agent socket server started", slog.F("path", server.path))
server.wg.Add(1)
go func() {
defer server.wg.Done()
server.acceptConnections()
}()
return server, nil
}
// Close stops the server and cleans up resources.
func (s *Server) Close() error {
s.mu.Lock()
if s.listener == nil {
s.mu.Unlock()
return nil
}
s.logger.Info(s.ctx, "stopping agent socket server")
s.cancel()
if err := s.listener.Close(); err != nil {
s.logger.Warn(s.ctx, "error closing socket listener", slog.Error(err))
}
s.listener = nil
s.mu.Unlock()
// Wait for all connections to finish
s.wg.Wait()
if err := cleanupSocket(s.path); err != nil {
s.logger.Warn(s.ctx, "error cleaning up socket file", slog.Error(err))
}
s.logger.Info(s.ctx, "agent socket server stopped")
return nil
}
func (s *Server) acceptConnections() {
// In an edge case, Close() might race with acceptConnections() and set s.listener to nil.
// Therefore, we grab a copy of the listener under a lock. We might still get a nil listener,
// but then we know close has already run and we can return early.
s.mu.Lock()
listener := s.listener
s.mu.Unlock()
if listener == nil {
return
}
err := s.drpcServer.Serve(s.ctx, listener)
if err != nil {
s.logger.Warn(s.ctx, "error serving drpc server", slog.Error(err))
}
}
-138
View File
@@ -1,138 +0,0 @@
package agentsocket_test
import (
"context"
"path/filepath"
"runtime"
"testing"
"github.com/google/uuid"
"github.com/spf13/afero"
"github.com/stretchr/testify/require"
"cdr.dev/slog"
"github.com/coder/coder/v2/agent"
"github.com/coder/coder/v2/agent/agentsocket"
"github.com/coder/coder/v2/agent/agenttest"
agentproto "github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/codersdk/agentsdk"
"github.com/coder/coder/v2/tailnet"
"github.com/coder/coder/v2/tailnet/tailnettest"
"github.com/coder/coder/v2/testutil"
)
func TestServer(t *testing.T) {
t.Parallel()
if runtime.GOOS == "windows" {
t.Skip("agentsocket is not supported on Windows")
}
t.Run("StartStop", func(t *testing.T) {
t.Parallel()
socketPath := filepath.Join(t.TempDir(), "test.sock")
logger := slog.Make().Leveled(slog.LevelDebug)
server, err := agentsocket.NewServer(logger, agentsocket.WithPath(socketPath))
require.NoError(t, err)
require.NoError(t, server.Close())
})
t.Run("AlreadyStarted", func(t *testing.T) {
t.Parallel()
socketPath := filepath.Join(t.TempDir(), "test.sock")
logger := slog.Make().Leveled(slog.LevelDebug)
server1, err := agentsocket.NewServer(logger, agentsocket.WithPath(socketPath))
require.NoError(t, err)
defer server1.Close()
_, err = agentsocket.NewServer(logger, agentsocket.WithPath(socketPath))
require.ErrorContains(t, err, "create socket")
})
t.Run("AutoSocketPath", func(t *testing.T) {
t.Parallel()
socketPath := filepath.Join(t.TempDir(), "test.sock")
logger := slog.Make().Leveled(slog.LevelDebug)
server, err := agentsocket.NewServer(logger, agentsocket.WithPath(socketPath))
require.NoError(t, err)
require.NoError(t, server.Close())
})
}
func TestServerWindowsNotSupported(t *testing.T) {
t.Parallel()
if runtime.GOOS != "windows" {
t.Skip("this test only runs on Windows")
}
t.Run("NewServer", func(t *testing.T) {
t.Parallel()
socketPath := filepath.Join(t.TempDir(), "test.sock")
logger := slog.Make().Leveled(slog.LevelDebug)
_, err := agentsocket.NewServer(logger, agentsocket.WithPath(socketPath))
require.ErrorContains(t, err, "agentsocket is not supported on Windows")
})
t.Run("NewClient", func(t *testing.T) {
t.Parallel()
_, err := agentsocket.NewClient(context.Background(), agentsocket.WithPath("test.sock"))
require.ErrorContains(t, err, "agentsocket is not supported on Windows")
})
}
func TestAgentInitializesOnWindowsWithoutSocketServer(t *testing.T) {
t.Parallel()
if runtime.GOOS != "windows" {
t.Skip("this test only runs on Windows")
}
ctx := testutil.Context(t, testutil.WaitShort)
logger := testutil.Logger(t).Named("agent")
derpMap, _ := tailnettest.RunDERPAndSTUN(t)
coordinator := tailnet.NewCoordinator(logger)
t.Cleanup(func() {
_ = coordinator.Close()
})
statsCh := make(chan *agentproto.Stats, 50)
agentID := uuid.New()
manifest := agentsdk.Manifest{
AgentID: agentID,
AgentName: "test-agent",
WorkspaceName: "test-workspace",
OwnerName: "test-user",
WorkspaceID: uuid.New(),
DERPMap: derpMap,
}
client := agenttest.NewClient(t, logger.Named("agenttest"), agentID, manifest, statsCh, coordinator)
t.Cleanup(client.Close)
options := agent.Options{
Client: client,
Filesystem: afero.NewMemMapFs(),
Logger: logger.Named("agent"),
ReconnectingPTYTimeout: testutil.WaitShort,
EnvironmentVariables: map[string]string{},
SocketPath: "",
}
agnt := agent.New(options)
t.Cleanup(func() {
_ = agnt.Close()
})
startup := testutil.TryReceive(ctx, t, client.GetStartup())
require.NotNil(t, startup, "agent should send startup message")
err := agnt.Close()
require.NoError(t, err, "agent should close cleanly")
}
-152
View File
@@ -1,152 +0,0 @@
package agentsocket
import (
"context"
"errors"
"golang.org/x/xerrors"
"cdr.dev/slog"
"github.com/coder/coder/v2/agent/agentsocket/proto"
"github.com/coder/coder/v2/agent/unit"
)
var _ proto.DRPCAgentSocketServer = (*DRPCAgentSocketService)(nil)
var ErrUnitManagerNotAvailable = xerrors.New("unit manager not available")
// DRPCAgentSocketService implements the DRPC agent socket service.
type DRPCAgentSocketService struct {
unitManager *unit.Manager
logger slog.Logger
}
// Ping responds to a ping request to check if the service is alive.
func (*DRPCAgentSocketService) Ping(_ context.Context, _ *proto.PingRequest) (*proto.PingResponse, error) {
return &proto.PingResponse{}, nil
}
// SyncStart starts a unit in the dependency graph.
func (s *DRPCAgentSocketService) SyncStart(_ context.Context, req *proto.SyncStartRequest) (*proto.SyncStartResponse, error) {
if s.unitManager == nil {
return nil, xerrors.Errorf("SyncStart: %w", ErrUnitManagerNotAvailable)
}
unitID := unit.ID(req.Unit)
if err := s.unitManager.Register(unitID); err != nil {
if !errors.Is(err, unit.ErrUnitAlreadyRegistered) {
return nil, xerrors.Errorf("SyncStart: %w", err)
}
}
isReady, err := s.unitManager.IsReady(unitID)
if err != nil {
return nil, xerrors.Errorf("cannot check readiness: %w", err)
}
if !isReady {
return nil, xerrors.Errorf("cannot start unit %q: unit not ready", req.Unit)
}
err = s.unitManager.UpdateStatus(unitID, unit.StatusStarted)
if err != nil {
return nil, xerrors.Errorf("cannot start unit %q: %w", req.Unit, err)
}
return &proto.SyncStartResponse{}, nil
}
// SyncWant declares a dependency between units.
func (s *DRPCAgentSocketService) SyncWant(_ context.Context, req *proto.SyncWantRequest) (*proto.SyncWantResponse, error) {
if s.unitManager == nil {
return nil, xerrors.Errorf("cannot add dependency: %w", ErrUnitManagerNotAvailable)
}
unitID := unit.ID(req.Unit)
dependsOnID := unit.ID(req.DependsOn)
if err := s.unitManager.Register(unitID); err != nil && !errors.Is(err, unit.ErrUnitAlreadyRegistered) {
return nil, xerrors.Errorf("cannot add dependency: %w", err)
}
if err := s.unitManager.AddDependency(unitID, dependsOnID, unit.StatusComplete); err != nil {
return nil, xerrors.Errorf("cannot add dependency: %w", err)
}
return &proto.SyncWantResponse{}, nil
}
// SyncComplete marks a unit as complete in the dependency graph.
func (s *DRPCAgentSocketService) SyncComplete(_ context.Context, req *proto.SyncCompleteRequest) (*proto.SyncCompleteResponse, error) {
if s.unitManager == nil {
return nil, xerrors.Errorf("cannot complete unit: %w", ErrUnitManagerNotAvailable)
}
unitID := unit.ID(req.Unit)
if err := s.unitManager.UpdateStatus(unitID, unit.StatusComplete); err != nil {
return nil, xerrors.Errorf("cannot complete unit %q: %w", req.Unit, err)
}
return &proto.SyncCompleteResponse{}, nil
}
// SyncReady checks whether a unit is ready to be started. That is, all dependencies are satisfied.
func (s *DRPCAgentSocketService) SyncReady(_ context.Context, req *proto.SyncReadyRequest) (*proto.SyncReadyResponse, error) {
if s.unitManager == nil {
return nil, xerrors.Errorf("cannot check readiness: %w", ErrUnitManagerNotAvailable)
}
unitID := unit.ID(req.Unit)
isReady, err := s.unitManager.IsReady(unitID)
if err != nil {
return nil, xerrors.Errorf("cannot check readiness: %w", err)
}
return &proto.SyncReadyResponse{
Ready: isReady,
}, nil
}
// SyncStatus gets the status of a unit and lists its dependencies.
func (s *DRPCAgentSocketService) SyncStatus(_ context.Context, req *proto.SyncStatusRequest) (*proto.SyncStatusResponse, error) {
if s.unitManager == nil {
return nil, xerrors.Errorf("cannot get status for unit %q: %w", req.Unit, ErrUnitManagerNotAvailable)
}
unitID := unit.ID(req.Unit)
isReady, err := s.unitManager.IsReady(unitID)
if err != nil {
return nil, xerrors.Errorf("cannot check readiness: %w", err)
}
dependencies, err := s.unitManager.GetAllDependencies(unitID)
switch {
case errors.Is(err, unit.ErrUnitNotFound):
dependencies = []unit.Dependency{}
case err != nil:
return nil, xerrors.Errorf("cannot get dependencies: %w", err)
}
var depInfos []*proto.DependencyInfo
for _, dep := range dependencies {
depInfos = append(depInfos, &proto.DependencyInfo{
Unit: string(dep.Unit),
DependsOn: string(dep.DependsOn),
RequiredStatus: string(dep.RequiredStatus),
CurrentStatus: string(dep.CurrentStatus),
IsSatisfied: dep.IsSatisfied,
})
}
u, err := s.unitManager.Unit(unitID)
if err != nil {
return nil, xerrors.Errorf("cannot get status for unit %q: %w", req.Unit, err)
}
return &proto.SyncStatusResponse{
Status: string(u.Status()),
IsReady: isReady,
Dependencies: depInfos,
}, nil
}
-389
View File
@@ -1,389 +0,0 @@
package agentsocket_test
import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"os"
"path/filepath"
"runtime"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"cdr.dev/slog"
"github.com/coder/coder/v2/agent/agentsocket"
"github.com/coder/coder/v2/agent/unit"
"github.com/coder/coder/v2/testutil"
)
// tempDirUnixSocket returns a temporary directory that can safely hold unix
// sockets (probably).
//
// During tests on darwin we hit the max path length limit for unix sockets
// pretty easily in the default location, so this function uses /tmp instead to
// get shorter paths. To keep paths short, we use a hash of the test name
// instead of the full test name.
func tempDirUnixSocket(t *testing.T) string {
t.Helper()
if runtime.GOOS == "darwin" {
// Use a short hash of the test name to keep the path under 104 chars
hash := sha256.Sum256([]byte(t.Name()))
hashStr := hex.EncodeToString(hash[:])[:8] // Use first 8 chars of hash
dir, err := os.MkdirTemp("/tmp", fmt.Sprintf("c-%s-", hashStr))
require.NoError(t, err, "create temp dir for unix socket test")
t.Cleanup(func() {
err := os.RemoveAll(dir)
assert.NoError(t, err, "remove temp dir", dir)
})
return dir
}
return t.TempDir()
}
// newSocketClient creates a DRPC client connected to the Unix socket at the given path.
func newSocketClient(ctx context.Context, t *testing.T, socketPath string) *agentsocket.Client {
t.Helper()
client, err := agentsocket.NewClient(ctx, agentsocket.WithPath(socketPath))
t.Cleanup(func() {
_ = client.Close()
})
require.NoError(t, err)
return client
}
func TestDRPCAgentSocketService(t *testing.T) {
t.Parallel()
if runtime.GOOS == "windows" {
t.Skip("agentsocket is not supported on Windows")
}
t.Run("Ping", func(t *testing.T) {
t.Parallel()
socketPath := filepath.Join(tempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
agentsocket.WithPath(socketPath),
)
require.NoError(t, err)
defer server.Close()
client := newSocketClient(ctx, t, socketPath)
err = client.Ping(ctx)
require.NoError(t, err)
})
t.Run("SyncStart", func(t *testing.T) {
t.Parallel()
t.Run("NewUnit", func(t *testing.T) {
t.Parallel()
socketPath := filepath.Join(tempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
agentsocket.WithPath(socketPath),
)
require.NoError(t, err)
defer server.Close()
client := newSocketClient(ctx, t, socketPath)
err = client.SyncStart(ctx, "test-unit")
require.NoError(t, err)
status, err := client.SyncStatus(ctx, "test-unit")
require.NoError(t, err)
require.Equal(t, unit.StatusStarted, status.Status)
})
t.Run("UnitAlreadyStarted", func(t *testing.T) {
t.Parallel()
socketPath := filepath.Join(tempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
agentsocket.WithPath(socketPath),
)
require.NoError(t, err)
defer server.Close()
client := newSocketClient(ctx, t, socketPath)
// First Start
err = client.SyncStart(ctx, "test-unit")
require.NoError(t, err)
status, err := client.SyncStatus(ctx, "test-unit")
require.NoError(t, err)
require.Equal(t, unit.StatusStarted, status.Status)
// Second Start
err = client.SyncStart(ctx, "test-unit")
require.ErrorContains(t, err, unit.ErrSameStatusAlreadySet.Error())
status, err = client.SyncStatus(ctx, "test-unit")
require.NoError(t, err)
require.Equal(t, unit.StatusStarted, status.Status)
})
t.Run("UnitAlreadyCompleted", func(t *testing.T) {
t.Parallel()
socketPath := filepath.Join(tempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
agentsocket.WithPath(socketPath),
)
require.NoError(t, err)
defer server.Close()
client := newSocketClient(ctx, t, socketPath)
// First start
err = client.SyncStart(ctx, "test-unit")
require.NoError(t, err)
status, err := client.SyncStatus(ctx, "test-unit")
require.NoError(t, err)
require.Equal(t, unit.StatusStarted, status.Status)
// Complete the unit
err = client.SyncComplete(ctx, "test-unit")
require.NoError(t, err)
status, err = client.SyncStatus(ctx, "test-unit")
require.NoError(t, err)
require.Equal(t, unit.StatusComplete, status.Status)
// Second start
err = client.SyncStart(ctx, "test-unit")
require.NoError(t, err)
status, err = client.SyncStatus(ctx, "test-unit")
require.NoError(t, err)
require.Equal(t, unit.StatusStarted, status.Status)
})
t.Run("UnitNotReady", func(t *testing.T) {
t.Parallel()
socketPath := filepath.Join(tempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
agentsocket.WithPath(socketPath),
)
require.NoError(t, err)
defer server.Close()
client := newSocketClient(ctx, t, socketPath)
err = client.SyncWant(ctx, "test-unit", "dependency-unit")
require.NoError(t, err)
err = client.SyncStart(ctx, "test-unit")
require.ErrorContains(t, err, "unit not ready")
status, err := client.SyncStatus(ctx, "test-unit")
require.NoError(t, err)
require.Equal(t, unit.StatusPending, status.Status)
require.False(t, status.IsReady)
})
})
t.Run("SyncWant", func(t *testing.T) {
t.Parallel()
t.Run("NewUnits", func(t *testing.T) {
t.Parallel()
socketPath := filepath.Join(tempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
agentsocket.WithPath(socketPath),
)
require.NoError(t, err)
defer server.Close()
client := newSocketClient(ctx, t, socketPath)
// If dependency units are not registered, they are registered automatically
err = client.SyncWant(ctx, "test-unit", "dependency-unit")
require.NoError(t, err)
status, err := client.SyncStatus(ctx, "test-unit")
require.NoError(t, err)
require.Len(t, status.Dependencies, 1)
require.Equal(t, unit.ID("dependency-unit"), status.Dependencies[0].DependsOn)
require.Equal(t, unit.StatusComplete, status.Dependencies[0].RequiredStatus)
})
t.Run("DependencyAlreadyRegistered", func(t *testing.T) {
t.Parallel()
socketPath := filepath.Join(tempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
agentsocket.WithPath(socketPath),
)
require.NoError(t, err)
defer server.Close()
client := newSocketClient(ctx, t, socketPath)
// Start the dependency unit
err = client.SyncStart(ctx, "dependency-unit")
require.NoError(t, err)
status, err := client.SyncStatus(ctx, "dependency-unit")
require.NoError(t, err)
require.Equal(t, unit.StatusStarted, status.Status)
// Add the dependency after the dependency unit has already started
err = client.SyncWant(ctx, "test-unit", "dependency-unit")
// Dependencies can be added even if the dependency unit has already started
require.NoError(t, err)
// The dependency is now reflected in the test unit's status
status, err = client.SyncStatus(ctx, "test-unit")
require.NoError(t, err)
require.Equal(t, unit.ID("dependency-unit"), status.Dependencies[0].DependsOn)
require.Equal(t, unit.StatusComplete, status.Dependencies[0].RequiredStatus)
})
t.Run("DependencyAddedAfterDependentStarted", func(t *testing.T) {
t.Parallel()
socketPath := filepath.Join(tempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
agentsocket.WithPath(socketPath),
)
require.NoError(t, err)
defer server.Close()
client := newSocketClient(ctx, t, socketPath)
// Start the dependent unit
err = client.SyncStart(ctx, "test-unit")
require.NoError(t, err)
status, err := client.SyncStatus(ctx, "test-unit")
require.NoError(t, err)
require.Equal(t, unit.StatusStarted, status.Status)
// Add the dependency after the dependency unit has already started
err = client.SyncWant(ctx, "test-unit", "dependency-unit")
// Dependencies can be added even if the dependent unit has already started.
// The dependency applies the next time a unit is started. The current status is not updated.
// This is to allow flexible dependency management. It does mean that users of this API should
// take care to add dependencies before they start their dependent units.
require.NoError(t, err)
// The dependency is now reflected in the test unit's status
status, err = client.SyncStatus(ctx, "test-unit")
require.NoError(t, err)
require.Equal(t, unit.ID("dependency-unit"), status.Dependencies[0].DependsOn)
require.Equal(t, unit.StatusComplete, status.Dependencies[0].RequiredStatus)
})
})
t.Run("SyncReady", func(t *testing.T) {
t.Parallel()
t.Run("UnregisteredUnit", func(t *testing.T) {
t.Parallel()
socketPath := filepath.Join(tempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
agentsocket.WithPath(socketPath),
)
require.NoError(t, err)
defer server.Close()
client := newSocketClient(ctx, t, socketPath)
ready, err := client.SyncReady(ctx, "unregistered-unit")
require.NoError(t, err)
require.True(t, ready)
})
t.Run("UnitNotReady", func(t *testing.T) {
t.Parallel()
socketPath := filepath.Join(tempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
agentsocket.WithPath(socketPath),
)
require.NoError(t, err)
defer server.Close()
client := newSocketClient(ctx, t, socketPath)
// Register a unit with an unsatisfied dependency
err = client.SyncWant(ctx, "test-unit", "dependency-unit")
require.NoError(t, err)
// Check readiness - should be false because dependency is not satisfied
ready, err := client.SyncReady(ctx, "test-unit")
require.NoError(t, err)
require.False(t, ready)
})
t.Run("UnitReady", func(t *testing.T) {
t.Parallel()
socketPath := filepath.Join(tempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
agentsocket.WithPath(socketPath),
)
require.NoError(t, err)
defer server.Close()
client := newSocketClient(ctx, t, socketPath)
// Register a unit with no dependencies - should be ready immediately
err = client.SyncStart(ctx, "test-unit")
require.NoError(t, err)
// Check readiness - should be true
ready, err := client.SyncReady(ctx, "test-unit")
require.NoError(t, err)
require.True(t, ready)
// Also test a unit with satisfied dependencies
err = client.SyncWant(ctx, "dependent-unit", "test-unit")
require.NoError(t, err)
// Complete the dependency
err = client.SyncComplete(ctx, "test-unit")
require.NoError(t, err)
// Now dependent-unit should be ready
ready, err = client.SyncReady(ctx, "dependent-unit")
require.NoError(t, err)
require.True(t, ready)
})
})
}
-73
View File
@@ -1,73 +0,0 @@
//go:build !windows
package agentsocket
import (
"context"
"net"
"os"
"path/filepath"
"time"
"golang.org/x/xerrors"
)
const defaultSocketPath = "/tmp/coder-agent.sock"
func createSocket(path string) (net.Listener, error) {
if path == "" {
path = defaultSocketPath
}
if !isSocketAvailable(path) {
return nil, xerrors.Errorf("socket path %s is not available", path)
}
if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
return nil, xerrors.Errorf("remove existing socket: %w", err)
}
parentDir := filepath.Dir(path)
if err := os.MkdirAll(parentDir, 0o700); err != nil {
return nil, xerrors.Errorf("create socket directory: %w", err)
}
listener, err := net.Listen("unix", path)
if err != nil {
return nil, xerrors.Errorf("listen on unix socket: %w", err)
}
if err := os.Chmod(path, 0o600); err != nil {
_ = listener.Close()
return nil, xerrors.Errorf("set socket permissions: %w", err)
}
return listener, nil
}
func cleanupSocket(path string) error {
return os.Remove(path)
}
func isSocketAvailable(path string) bool {
if _, err := os.Stat(path); os.IsNotExist(err) {
return true
}
// Try to connect to see if it's actually listening.
dialer := net.Dialer{Timeout: 10 * time.Second}
conn, err := dialer.Dial("unix", path)
if err != nil {
return true
}
_ = conn.Close()
return false
}
func dialSocket(ctx context.Context, path string) (net.Conn, error) {
if path == "" {
path = defaultSocketPath
}
dialer := net.Dialer{}
return dialer.DialContext(ctx, "unix", path)
}
-22
View File
@@ -1,22 +0,0 @@
//go:build windows
package agentsocket
import (
"context"
"net"
"golang.org/x/xerrors"
)
func createSocket(_ string) (net.Listener, error) {
return nil, xerrors.New("agentsocket is not supported on Windows")
}
func cleanupSocket(_ string) error {
return nil
}
func dialSocket(_ context.Context, _ string) (net.Conn, error) {
return nil, xerrors.New("agentsocket is not supported on Windows")
}
+31 -33
View File
@@ -2,31 +2,41 @@ package agent
import (
"net/http"
"sync"
"time"
"github.com/go-chi/chi/v5"
"github.com/google/uuid"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/coderd/httpmw/loggermw"
"github.com/coder/coder/v2/coderd/tracing"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/codersdk/workspacesdk"
"github.com/coder/coder/v2/httpmw"
)
func (a *agent) apiHandler() http.Handler {
r := chi.NewRouter()
r.Use(
httpmw.Recover(a.logger),
tracing.StatusWriterMiddleware,
loggermw.Logger(a.logger),
)
r.Get("/", func(rw http.ResponseWriter, r *http.Request) {
httpapi.Write(r.Context(), rw, http.StatusOK, codersdk.Response{
Message: "Hello from the agent!",
})
})
// Make a copy to ensure the map is not modified after the handler is
// created.
cpy := make(map[int]string)
for k, b := range a.ignorePorts {
cpy[k] = b
}
cacheDuration := 1 * time.Second
if a.portCacheDuration > 0 {
cacheDuration = a.portCacheDuration
}
lp := &listeningPortsHandler{
ignorePorts: cpy,
cacheDuration: cacheDuration,
}
if a.devcontainers {
r.Mount("/api/v0/containers", a.containerAPI.Routes())
} else if manifest := a.manifest.Load(); manifest != nil && manifest.ParentID != uuid.Nil {
@@ -47,7 +57,7 @@ func (a *agent) apiHandler() http.Handler {
promHandler := PrometheusMetricsHandler(a.prometheusRegistry, a.logger)
r.Get("/api/v0/listening-ports", a.listeningPortsHandler.handler)
r.Get("/api/v0/listening-ports", lp.handler)
r.Get("/api/v0/netcheck", a.HandleNetcheck)
r.Post("/api/v0/list-directory", a.HandleLS)
r.Get("/api/v0/read-file", a.HandleReadFile)
@@ -62,21 +72,22 @@ func (a *agent) apiHandler() http.Handler {
return r
}
type ListeningPortsGetter interface {
GetListeningPorts() ([]codersdk.WorkspaceAgentListeningPort, error)
}
type listeningPortsHandler struct {
// In production code, this is set to an osListeningPortsGetter, but it can be overridden for
// testing.
getter ListeningPortsGetter
ignorePorts map[int]string
ignorePorts map[int]string
cacheDuration time.Duration
//nolint: unused // used on some but not all platforms
mut sync.Mutex
//nolint: unused // used on some but not all platforms
ports []codersdk.WorkspaceAgentListeningPort
//nolint: unused // used on some but not all platforms
mtime time.Time
}
// handler returns a list of listening ports. This is tested by coderd's
// TestWorkspaceAgentListeningPorts test.
func (lp *listeningPortsHandler) handler(rw http.ResponseWriter, r *http.Request) {
ports, err := lp.getter.GetListeningPorts()
ports, err := lp.getListeningPorts()
if err != nil {
httpapi.Write(r.Context(), rw, http.StatusInternalServerError, codersdk.Response{
Message: "Could not scan for listening ports.",
@@ -85,20 +96,7 @@ func (lp *listeningPortsHandler) handler(rw http.ResponseWriter, r *http.Request
return
}
filteredPorts := make([]codersdk.WorkspaceAgentListeningPort, 0, len(ports))
for _, port := range ports {
if port.Port < workspacesdk.AgentMinimumListeningPort {
continue
}
// Ignore ports that we've been told to ignore.
if _, ok := lp.ignorePorts[int(port.Port)]; ok {
continue
}
filteredPorts = append(filteredPorts, port)
}
httpapi.Write(r.Context(), rw, http.StatusOK, codersdk.WorkspaceAgentListeningPortsResponse{
Ports: filteredPorts,
Ports: ports,
})
}
+1 -3
View File
@@ -250,9 +250,7 @@ func (a *agent) editFile(ctx context.Context, path string, edits []workspacesdk.
transforms[i] = replace.String(edit.Search, edit.Replace)
}
// Create an adjacent file to ensure it will be on the same device and can be
// moved atomically.
tmpfile, err := afero.TempFile(a.filesystem, filepath.Dir(path), filepath.Base(path))
tmpfile, err := afero.TempFile(a.filesystem, "", filepath.Base(path))
if err != nil {
return http.StatusInternalServerError, err
}
+8 -10
View File
@@ -3,23 +3,16 @@
package agent
import (
"sync"
"time"
"github.com/cakturk/go-netstat/netstat"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/codersdk/workspacesdk"
)
type osListeningPortsGetter struct {
cacheDuration time.Duration
mut sync.Mutex
ports []codersdk.WorkspaceAgentListeningPort
mtime time.Time
}
func (lp *osListeningPortsGetter) GetListeningPorts() ([]codersdk.WorkspaceAgentListeningPort, error) {
func (lp *listeningPortsHandler) getListeningPorts() ([]codersdk.WorkspaceAgentListeningPort, error) {
lp.mut.Lock()
defer lp.mut.Unlock()
@@ -40,7 +33,12 @@ func (lp *osListeningPortsGetter) GetListeningPorts() ([]codersdk.WorkspaceAgent
seen := make(map[uint16]struct{}, len(tabs))
ports := []codersdk.WorkspaceAgentListeningPort{}
for _, tab := range tabs {
if tab.LocalAddr == nil {
if tab.LocalAddr == nil || tab.LocalAddr.Port < workspacesdk.AgentMinimumListeningPort {
continue
}
// Ignore ports that we've been told to ignore.
if _, ok := lp.ignorePorts[int(tab.LocalAddr.Port)]; ok {
continue
}
-45
View File
@@ -1,45 +0,0 @@
//go:build linux || (windows && amd64)
package agent
import (
"net"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestOSListeningPortsGetter(t *testing.T) {
t.Parallel()
uut := &osListeningPortsGetter{
cacheDuration: 1 * time.Hour,
}
l, err := net.Listen("tcp", "localhost:0")
require.NoError(t, err)
defer l.Close()
ports, err := uut.GetListeningPorts()
require.NoError(t, err)
found := false
for _, port := range ports {
// #nosec G115 - Safe conversion as TCP port numbers are within uint16 range (0-65535)
if port.Port == uint16(l.Addr().(*net.TCPAddr).Port) {
found = true
break
}
}
require.True(t, found)
// check that we cache the ports
err = l.Close()
require.NoError(t, err)
portsNew, err := uut.GetListeningPorts()
require.NoError(t, err)
require.Equal(t, ports, portsNew)
// note that it's unsafe to try to assert that a port does not exist in the response
// because the OS may reallocate the port very quickly.
}
+2 -10
View File
@@ -2,17 +2,9 @@
package agent
import (
"time"
import "github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/codersdk"
)
type osListeningPortsGetter struct {
cacheDuration time.Duration
}
func (*osListeningPortsGetter) GetListeningPorts() ([]codersdk.WorkspaceAgentListeningPort, error) {
func (*listeningPortsHandler) getListeningPorts() ([]codersdk.WorkspaceAgentListeningPort, error) {
// Can't scan for ports on non-linux or non-windows_amd64 systems at the
// moment. The UI will not show any "no ports found" message to the user, so
// the user won't suspect a thing.
+1 -1
View File
@@ -58,7 +58,7 @@ func (g *Graph[EdgeType, VertexType]) AddEdge(from, to VertexType, edge EdgeType
toID := g.getOrCreateVertexID(to)
if g.canReach(to, from) {
return xerrors.Errorf("adding edge (%v -> %v): %w", from, to, ErrCycleDetected)
return xerrors.Errorf("adding edge (%v -> %v) would create a cycle", from, to)
}
g.gonumGraph.SetEdge(simple.Edge{F: simple.Node(fromID), T: simple.Node(toID)})
+5 -3
View File
@@ -148,7 +148,8 @@ func TestGraph(t *testing.T) {
graph := &testGraph{}
unit1 := &testGraphVertex{Name: "unit1"}
err := graph.AddEdge(unit1, unit1, testEdgeCompleted)
require.ErrorIs(t, err, unit.ErrCycleDetected)
require.Error(t, err)
require.ErrorContains(t, err, fmt.Sprintf("adding edge (%v -> %v) would create a cycle", unit1, unit1))
return graph
},
@@ -159,7 +160,8 @@ func TestGraph(t *testing.T) {
err := graph.AddEdge(unit1, unit2, testEdgeCompleted)
require.NoError(t, err)
err = graph.AddEdge(unit2, unit1, testEdgeStarted)
require.ErrorIs(t, err, unit.ErrCycleDetected)
require.Error(t, err)
require.ErrorContains(t, err, fmt.Sprintf("adding edge (%v -> %v) would create a cycle", unit2, unit1))
return graph
},
@@ -339,7 +341,7 @@ func TestGraphThreadSafety(t *testing.T) {
// Verify all attempts correctly returned cycle error
for i, err := range cycleErrors {
require.Error(t, err, "goroutine %d should have detected cycle", i)
require.ErrorIs(t, err, unit.ErrCycleDetected)
require.Contains(t, err.Error(), "would create a cycle")
}
// Verify graph remains valid (original chain intact)
-290
View File
@@ -1,290 +0,0 @@
package unit
import (
"errors"
"fmt"
"sync"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/coderd/util/slice"
)
var (
ErrUnitIDRequired = xerrors.New("unit name is required")
ErrUnitNotFound = xerrors.New("unit not found")
ErrUnitAlreadyRegistered = xerrors.New("unit already registered")
ErrCannotUpdateOtherUnit = xerrors.New("cannot update other unit's status")
ErrDependenciesNotSatisfied = xerrors.New("unit dependencies not satisfied")
ErrSameStatusAlreadySet = xerrors.New("same status already set")
ErrCycleDetected = xerrors.New("cycle detected")
ErrFailedToAddDependency = xerrors.New("failed to add dependency")
)
// Status represents the status of a unit.
type Status string
var _ fmt.Stringer = Status("")
func (s Status) String() string {
if s == StatusNotRegistered {
return "not registered"
}
return string(s)
}
// Status constants for dependency tracking.
const (
StatusNotRegistered Status = ""
StatusPending Status = "pending"
StatusStarted Status = "started"
StatusComplete Status = "completed"
)
// ID provides a type narrowed representation of the unique identifier of a unit.
type ID string
// Unit represents a point-in-time snapshot of a vertex in the dependency graph.
// Units may depend on other units, or be depended on by other units. The unit struct
// is not aware of updates made to the dependency graph after it is initialized and should
// not be cached.
type Unit struct {
id ID
status Status
// ready is true if all dependencies are satisfied.
// It does not have an accessor method on Unit, because a unit cannot know whether it is ready.
// Only the Manager can calculate whether a unit is ready based on knowledge of the dependency graph.
// To discourage use of an outdated readiness value, only the Manager should set and return this field.
ready bool
}
func (u Unit) ID() ID {
return u.id
}
func (u Unit) Status() Status {
return u.status
}
// Dependency represents a dependency relationship between units.
type Dependency struct {
Unit ID
DependsOn ID
RequiredStatus Status
CurrentStatus Status
IsSatisfied bool
}
// Manager provides reactive dependency tracking over a Graph.
// It manages Unit registration, dependency relationships, and status updates
// with automatic recalculation of readiness when dependencies are satisfied.
type Manager struct {
mu sync.RWMutex
// The underlying graph that stores dependency relationships
graph *Graph[Status, ID]
// Store vertex instances for each unit to ensure consistent references
units map[ID]Unit
}
// NewManager creates a new Manager instance.
func NewManager() *Manager {
return &Manager{
graph: &Graph[Status, ID]{},
units: make(map[ID]Unit),
}
}
// Register adds a unit to the manager if it is not already registered.
// If a Unit is already registered (per the ID field), it is not updated.
func (m *Manager) Register(id ID) error {
m.mu.Lock()
defer m.mu.Unlock()
if id == "" {
return xerrors.Errorf("registering unit %q: %w", id, ErrUnitIDRequired)
}
if m.registered(id) {
return xerrors.Errorf("registering unit %q: %w", id, ErrUnitAlreadyRegistered)
}
m.units[id] = Unit{
id: id,
status: StatusPending,
ready: true,
}
return nil
}
// registered checks if a unit is registered in the manager.
func (m *Manager) registered(id ID) bool {
return m.units[id].status != StatusNotRegistered
}
// Unit fetches a unit from the manager. If the unit does not exist,
// it returns the Unit zero-value as a placeholder unit, because
// units may depend on other units that have not yet been created.
func (m *Manager) Unit(id ID) (Unit, error) {
if id == "" {
return Unit{}, xerrors.Errorf("unit ID cannot be empty: %w", ErrUnitIDRequired)
}
m.mu.RLock()
defer m.mu.RUnlock()
return m.units[id], nil
}
func (m *Manager) IsReady(id ID) (bool, error) {
if id == "" {
return false, xerrors.Errorf("unit ID cannot be empty: %w", ErrUnitIDRequired)
}
m.mu.RLock()
defer m.mu.RUnlock()
if !m.registered(id) {
return true, nil
}
return m.units[id].ready, nil
}
// AddDependency adds a dependency relationship between units.
// The unit depends on the dependsOn unit reaching the requiredStatus.
func (m *Manager) AddDependency(unit ID, dependsOn ID, requiredStatus Status) error {
m.mu.Lock()
defer m.mu.Unlock()
switch {
case unit == "":
return xerrors.Errorf("dependent name cannot be empty: %w", ErrUnitIDRequired)
case dependsOn == "":
return xerrors.Errorf("dependency name cannot be empty: %w", ErrUnitIDRequired)
case !m.registered(unit):
return xerrors.Errorf("dependent unit %q must be registered first: %w", unit, ErrUnitNotFound)
}
// Add the dependency edge to the graph
// The edge goes from unit to dependsOn, representing the dependency
err := m.graph.AddEdge(unit, dependsOn, requiredStatus)
if err != nil {
return xerrors.Errorf("adding edge for unit %q: %w", unit, errors.Join(ErrFailedToAddDependency, err))
}
// Recalculate readiness for the unit since it now has a new dependency
m.recalculateReadinessUnsafe(unit)
return nil
}
// UpdateStatus updates a unit's status and recalculates readiness for affected dependents.
func (m *Manager) UpdateStatus(unit ID, newStatus Status) error {
m.mu.Lock()
defer m.mu.Unlock()
switch {
case unit == "":
return xerrors.Errorf("updating status for unit %q: %w", unit, ErrUnitIDRequired)
case !m.registered(unit):
return xerrors.Errorf("unit %q must be registered first: %w", unit, ErrUnitNotFound)
}
u := m.units[unit]
if u.status == newStatus {
return xerrors.Errorf("checking status for unit %q: %w", unit, ErrSameStatusAlreadySet)
}
u.status = newStatus
m.units[unit] = u
// Get all units that depend on this one (reverse adjacent vertices)
dependents := m.graph.GetReverseAdjacentVertices(unit)
// Recalculate readiness for all dependents
for _, dependent := range dependents {
m.recalculateReadinessUnsafe(dependent.From)
}
return nil
}
// recalculateReadinessUnsafe recalculates the readiness state for a unit.
// This method assumes the caller holds the write lock.
func (m *Manager) recalculateReadinessUnsafe(unit ID) {
u := m.units[unit]
dependencies := m.graph.GetForwardAdjacentVertices(unit)
allSatisfied := true
for _, dependency := range dependencies {
requiredStatus := dependency.Edge
dependsOnUnit := m.units[dependency.To]
if dependsOnUnit.status != requiredStatus {
allSatisfied = false
break
}
}
u.ready = allSatisfied
m.units[unit] = u
}
// GetGraph returns the underlying graph for visualization and debugging.
// This should be used carefully as it exposes the internal graph structure.
func (m *Manager) GetGraph() *Graph[Status, ID] {
return m.graph
}
// GetAllDependencies returns all dependencies for a unit, both satisfied and unsatisfied.
func (m *Manager) GetAllDependencies(unit ID) ([]Dependency, error) {
m.mu.RLock()
defer m.mu.RUnlock()
if unit == "" {
return nil, xerrors.Errorf("unit ID cannot be empty: %w", ErrUnitIDRequired)
}
if !m.registered(unit) {
return nil, xerrors.Errorf("checking registration for unit %q: %w", unit, ErrUnitNotFound)
}
dependencies := m.graph.GetForwardAdjacentVertices(unit)
var allDependencies []Dependency
for _, dependency := range dependencies {
dependsOnUnit := m.units[dependency.To]
requiredStatus := dependency.Edge
allDependencies = append(allDependencies, Dependency{
Unit: unit,
DependsOn: dependency.To,
RequiredStatus: requiredStatus,
CurrentStatus: dependsOnUnit.status,
IsSatisfied: dependsOnUnit.status == requiredStatus,
})
}
return allDependencies, nil
}
// GetUnmetDependencies returns a list of unsatisfied dependencies for a unit.
func (m *Manager) GetUnmetDependencies(unit ID) ([]Dependency, error) {
allDependencies, err := m.GetAllDependencies(unit)
if err != nil {
return nil, err
}
var unmetDependencies []Dependency = slice.Filter(allDependencies, func(dependency Dependency) bool {
return !dependency.IsSatisfied
})
return unmetDependencies, nil
}
// ExportDOT exports the dependency graph to DOT format for visualization.
func (m *Manager) ExportDOT(name string) (string, error) {
return m.graph.ToDOT(name)
}
-743
View File
@@ -1,743 +0,0 @@
package unit_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/coder/coder/v2/agent/unit"
)
const (
unitA unit.ID = "serviceA"
unitB unit.ID = "serviceB"
unitC unit.ID = "serviceC"
unitD unit.ID = "serviceD"
)
func TestManager_UnitValidation(t *testing.T) {
t.Parallel()
t.Run("Empty Unit Name", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
err := manager.Register("")
require.ErrorIs(t, err, unit.ErrUnitIDRequired)
err = manager.AddDependency("", unitA, unit.StatusStarted)
require.ErrorIs(t, err, unit.ErrUnitIDRequired)
err = manager.AddDependency(unitA, "", unit.StatusStarted)
require.ErrorIs(t, err, unit.ErrUnitIDRequired)
dependencies, err := manager.GetAllDependencies("")
require.ErrorIs(t, err, unit.ErrUnitIDRequired)
require.Len(t, dependencies, 0)
unmetDependencies, err := manager.GetUnmetDependencies("")
require.ErrorIs(t, err, unit.ErrUnitIDRequired)
require.Len(t, unmetDependencies, 0)
err = manager.UpdateStatus("", unit.StatusStarted)
require.ErrorIs(t, err, unit.ErrUnitIDRequired)
isReady, err := manager.IsReady("")
require.ErrorIs(t, err, unit.ErrUnitIDRequired)
require.False(t, isReady)
u, err := manager.Unit("")
require.ErrorIs(t, err, unit.ErrUnitIDRequired)
assert.Equal(t, unit.Unit{}, u)
})
}
func TestManager_Register(t *testing.T) {
t.Parallel()
t.Run("RegisterNewUnit", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
// Given: a unit is registered
err := manager.Register(unitA)
require.NoError(t, err)
// Then: the unit should be ready (no dependencies)
u, err := manager.Unit(unitA)
require.NoError(t, err)
assert.Equal(t, unitA, u.ID())
assert.Equal(t, unit.StatusPending, u.Status())
isReady, err := manager.IsReady(unitA)
require.NoError(t, err)
assert.True(t, isReady)
})
t.Run("RegisterDuplicateUnit", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
// Given: a unit is registered
err := manager.Register(unitA)
require.NoError(t, err)
// Newly registered units have StatusPending. We update the unit status to StatusStarted,
// so we can later assert that it is not overwritten back to StatusPending by the second
// register call
manager.UpdateStatus(unitA, unit.StatusStarted)
// When: the unit is registered again
err = manager.Register(unitA)
// Then: a descriptive error should be returned
require.ErrorIs(t, err, unit.ErrUnitAlreadyRegistered)
// Then: the unit status should not be overwritten
u, err := manager.Unit(unitA)
require.NoError(t, err)
assert.Equal(t, unit.StatusStarted, u.Status())
isReady, err := manager.IsReady(unitA)
require.NoError(t, err)
assert.True(t, isReady)
})
t.Run("RegisterMultipleUnits", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
// Given: multiple units are registered
unitIDs := []unit.ID{unitA, unitB, unitC}
for _, unit := range unitIDs {
err := manager.Register(unit)
require.NoError(t, err)
}
// Then: all units should be ready initially
for _, unitID := range unitIDs {
u, err := manager.Unit(unitID)
require.NoError(t, err)
assert.Equal(t, unit.StatusPending, u.Status())
isReady, err := manager.IsReady(unitID)
require.NoError(t, err)
assert.True(t, isReady)
}
})
}
func TestManager_AddDependency(t *testing.T) {
t.Parallel()
t.Run("AddDependencyBetweenRegisteredUnits", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
// Given: units A and B are registered
err := manager.Register(unitA)
require.NoError(t, err)
err = manager.Register(unitB)
require.NoError(t, err)
// Given: Unit A depends on Unit B being unit.StatusStarted
err = manager.AddDependency(unitA, unitB, unit.StatusStarted)
require.NoError(t, err)
// Then: Unit A should not be ready (depends on B)
u, err := manager.Unit(unitA)
require.NoError(t, err)
assert.Equal(t, unit.StatusPending, u.Status())
isReady, err := manager.IsReady(unitA)
require.NoError(t, err)
assert.False(t, isReady)
// Then: Unit B should still be ready (no dependencies)
u, err = manager.Unit(unitB)
require.NoError(t, err)
assert.Equal(t, unit.StatusPending, u.Status())
isReady, err = manager.IsReady(unitB)
require.NoError(t, err)
assert.True(t, isReady)
// When: Unit B is started
err = manager.UpdateStatus(unitB, unit.StatusStarted)
require.NoError(t, err)
// Then: Unit A should be ready, because its dependency is now in the desired state.
isReady, err = manager.IsReady(unitA)
require.NoError(t, err)
assert.True(t, isReady)
// When: Unit B is stopped
err = manager.UpdateStatus(unitB, unit.StatusPending)
require.NoError(t, err)
// Then: Unit A should no longer be ready, because its dependency is not in the desired state.
isReady, err = manager.IsReady(unitA)
require.NoError(t, err)
assert.False(t, isReady)
})
t.Run("AddDependencyByAnUnregisteredDependentUnit", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
// Given Unit B is registered
err := manager.Register(unitB)
require.NoError(t, err)
// Given Unit A depends on Unit B being started
err = manager.AddDependency(unitA, unitB, unit.StatusStarted)
// Then: a descriptive error communicates that the dependency cannot be added
// because the dependent unit must be registered first.
require.ErrorIs(t, err, unit.ErrUnitNotFound)
})
t.Run("AddDependencyOnAnUnregisteredUnit", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
// Given unit A is registered
err := manager.Register(unitA)
require.NoError(t, err)
// Given Unit B is not yet registered
// And Unit A depends on Unit B being started
err = manager.AddDependency(unitA, unitB, unit.StatusStarted)
require.NoError(t, err)
// Then: The dependency should be visible in Unit A's status
dependencies, err := manager.GetAllDependencies(unitA)
require.NoError(t, err)
require.Len(t, dependencies, 1)
assert.Equal(t, unitB, dependencies[0].DependsOn)
assert.Equal(t, unit.StatusStarted, dependencies[0].RequiredStatus)
assert.False(t, dependencies[0].IsSatisfied)
u, err := manager.Unit(unitB)
require.NoError(t, err)
assert.Equal(t, unit.StatusNotRegistered, u.Status())
// Then: Unit A should not be ready, because it depends on Unit B
isReady, err := manager.IsReady(unitA)
require.NoError(t, err)
assert.False(t, isReady)
// When: Unit B is registered
err = manager.Register(unitB)
require.NoError(t, err)
// Then: Unit A should still not be ready.
// Unit B is not registered, but it has not been started as required by the dependency.
isReady, err = manager.IsReady(unitA)
require.NoError(t, err)
assert.False(t, isReady)
// When: Unit B is started
err = manager.UpdateStatus(unitB, unit.StatusStarted)
require.NoError(t, err)
// Then: Unit A should be ready, because its dependency is now in the desired state.
isReady, err = manager.IsReady(unitA)
require.NoError(t, err)
assert.True(t, isReady)
})
t.Run("AddDependencyCreatesACyclicDependency", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
// Register units
err := manager.Register(unitA)
require.NoError(t, err)
err = manager.Register(unitB)
require.NoError(t, err)
err = manager.Register(unitC)
require.NoError(t, err)
err = manager.Register(unitD)
require.NoError(t, err)
// A depends on B
err = manager.AddDependency(unitA, unitB, unit.StatusStarted)
require.NoError(t, err)
// B depends on C
err = manager.AddDependency(unitB, unitC, unit.StatusStarted)
require.NoError(t, err)
// C depends on D
err = manager.AddDependency(unitC, unitD, unit.StatusStarted)
require.NoError(t, err)
// Try to make D depend on A (creates indirect cycle)
err = manager.AddDependency(unitD, unitA, unit.StatusStarted)
require.ErrorIs(t, err, unit.ErrCycleDetected)
})
t.Run("UpdatingADependency", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
// Given units A and B are registered
err := manager.Register(unitA)
require.NoError(t, err)
err = manager.Register(unitB)
require.NoError(t, err)
// Given Unit A depends on Unit B being unit.StatusStarted
err = manager.AddDependency(unitA, unitB, unit.StatusStarted)
require.NoError(t, err)
// When: The dependency is updated to unit.StatusComplete
err = manager.AddDependency(unitA, unitB, unit.StatusComplete)
require.NoError(t, err)
// Then: Unit A should only have one dependency, and it should be unit.StatusComplete
dependencies, err := manager.GetAllDependencies(unitA)
require.NoError(t, err)
require.Len(t, dependencies, 1)
assert.Equal(t, unit.StatusComplete, dependencies[0].RequiredStatus)
})
}
func TestManager_UpdateStatus(t *testing.T) {
t.Parallel()
t.Run("UpdateStatusTriggersReadinessRecalculation", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
// Given units A and B are registered
err := manager.Register(unitA)
require.NoError(t, err)
err = manager.Register(unitB)
require.NoError(t, err)
// Given Unit A depends on Unit B being unit.StatusStarted
err = manager.AddDependency(unitA, unitB, unit.StatusStarted)
require.NoError(t, err)
// Then: Unit A should not be ready (depends on B)
u, err := manager.Unit(unitA)
require.NoError(t, err)
assert.Equal(t, unit.StatusPending, u.Status())
isReady, err := manager.IsReady(unitA)
require.NoError(t, err)
assert.False(t, isReady)
// When: Unit B is started
err = manager.UpdateStatus(unitB, unit.StatusStarted)
require.NoError(t, err)
// Then: Unit A should be ready, because its dependency is now in the desired state.
u, err = manager.Unit(unitA)
require.NoError(t, err)
assert.Equal(t, unit.StatusPending, u.Status())
isReady, err = manager.IsReady(unitA)
require.NoError(t, err)
assert.True(t, isReady)
})
t.Run("UpdateStatusWithUnregisteredUnit", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
// Given Unit A is not registered
// When: Unit A is updated to unit.StatusStarted
err := manager.UpdateStatus(unitA, unit.StatusStarted)
// Then: a descriptive error communicates that the unit must be registered first.
require.ErrorIs(t, err, unit.ErrUnitNotFound)
})
t.Run("LinearChainDependencies", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
// Given units A, B, and C are registered
err := manager.Register(unitA)
require.NoError(t, err)
err = manager.Register(unitB)
require.NoError(t, err)
err = manager.Register(unitC)
require.NoError(t, err)
// Create chain: A depends on B being "started", B depends on C being "completed"
err = manager.AddDependency(unitA, unitB, unit.StatusStarted)
require.NoError(t, err)
err = manager.AddDependency(unitB, unitC, unit.StatusComplete)
require.NoError(t, err)
// Then: only Unit C should be ready (no dependencies)
u, err := manager.Unit(unitC)
require.NoError(t, err)
assert.Equal(t, unit.StatusPending, u.Status())
isReady, err := manager.IsReady(unitC)
require.NoError(t, err)
assert.True(t, isReady)
u, err = manager.Unit(unitB)
require.NoError(t, err)
assert.Equal(t, unit.StatusPending, u.Status())
isReady, err = manager.IsReady(unitB)
require.NoError(t, err)
assert.False(t, isReady)
u, err = manager.Unit(unitA)
require.NoError(t, err)
assert.Equal(t, unit.StatusPending, u.Status())
isReady, err = manager.IsReady(unitA)
require.NoError(t, err)
assert.False(t, isReady)
// When: Unit C is completed
err = manager.UpdateStatus(unitC, unit.StatusComplete)
require.NoError(t, err)
// Then: Unit B should be ready, because its dependency is now in the desired state.
u, err = manager.Unit(unitB)
require.NoError(t, err)
assert.Equal(t, unit.StatusPending, u.Status())
isReady, err = manager.IsReady(unitB)
require.NoError(t, err)
assert.True(t, isReady)
u, err = manager.Unit(unitA)
require.NoError(t, err)
assert.Equal(t, unit.StatusPending, u.Status())
isReady, err = manager.IsReady(unitA)
require.NoError(t, err)
assert.False(t, isReady)
u, err = manager.Unit(unitB)
require.NoError(t, err)
assert.Equal(t, unit.StatusPending, u.Status())
isReady, err = manager.IsReady(unitB)
require.NoError(t, err)
assert.True(t, isReady)
// When: Unit B is started
err = manager.UpdateStatus(unitB, unit.StatusStarted)
require.NoError(t, err)
// Then: Unit A should be ready, because its dependency is now in the desired state.
u, err = manager.Unit(unitA)
require.NoError(t, err)
assert.Equal(t, unit.StatusPending, u.Status())
isReady, err = manager.IsReady(unitA)
require.NoError(t, err)
assert.True(t, isReady)
})
}
func TestManager_GetUnmetDependencies(t *testing.T) {
t.Parallel()
t.Run("GetUnmetDependenciesForUnitWithNoDependencies", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
// Given: Unit A is registered
err := manager.Register(unitA)
require.NoError(t, err)
// Given: Unit A has no dependencies
// Then: Unit A should have no unmet dependencies
unmet, err := manager.GetUnmetDependencies(unitA)
require.NoError(t, err)
assert.Empty(t, unmet)
})
t.Run("GetUnmetDependenciesForUnitWithUnsatisfiedDependencies", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
err := manager.Register(unitA)
require.NoError(t, err)
err = manager.Register(unitB)
require.NoError(t, err)
// Given: Unit A depends on Unit B being unit.StatusStarted
err = manager.AddDependency(unitA, unitB, unit.StatusStarted)
require.NoError(t, err)
unmet, err := manager.GetUnmetDependencies(unitA)
require.NoError(t, err)
require.Len(t, unmet, 1)
assert.Equal(t, unitA, unmet[0].Unit)
assert.Equal(t, unitB, unmet[0].DependsOn)
assert.Equal(t, unit.StatusStarted, unmet[0].RequiredStatus)
assert.False(t, unmet[0].IsSatisfied)
})
t.Run("GetUnmetDependenciesForUnitWithSatisfiedDependencies", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
// Given: Unit A and Unit B are registered
err := manager.Register(unitA)
require.NoError(t, err)
err = manager.Register(unitB)
require.NoError(t, err)
// Given: Unit A depends on Unit B being unit.StatusStarted
err = manager.AddDependency(unitA, unitB, unit.StatusStarted)
require.NoError(t, err)
// When: Unit B is started
err = manager.UpdateStatus(unitB, unit.StatusStarted)
require.NoError(t, err)
// Then: Unit A should have no unmet dependencies
unmet, err := manager.GetUnmetDependencies(unitA)
require.NoError(t, err)
assert.Empty(t, unmet)
})
t.Run("GetUnmetDependenciesForUnregisteredUnit", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
// When: Unit A is requested
unmet, err := manager.GetUnmetDependencies(unitA)
// Then: a descriptive error communicates that the unit must be registered first.
require.ErrorIs(t, err, unit.ErrUnitNotFound)
assert.Nil(t, unmet)
})
}
func TestManager_MultipleDependencies(t *testing.T) {
t.Parallel()
t.Run("UnitWithMultipleDependencies", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
// Register all units
units := []unit.ID{unitA, unitB, unitC, unitD}
for _, unit := range units {
err := manager.Register(unit)
require.NoError(t, err)
}
// A depends on B being unit.StatusStarted AND C being "started"
err := manager.AddDependency(unitA, unitB, unit.StatusStarted)
require.NoError(t, err)
err = manager.AddDependency(unitA, unitC, unit.StatusStarted)
require.NoError(t, err)
// A should not be ready (depends on both B and C)
isReady, err := manager.IsReady(unitA)
require.NoError(t, err)
assert.False(t, isReady)
// Update B to unit.StatusStarted - A should still not be ready (needs C too)
err = manager.UpdateStatus(unitB, unit.StatusStarted)
require.NoError(t, err)
isReady, err = manager.IsReady(unitA)
require.NoError(t, err)
assert.False(t, isReady)
// Update C to "started" - A should now be ready
err = manager.UpdateStatus(unitC, unit.StatusStarted)
require.NoError(t, err)
isReady, err = manager.IsReady(unitA)
require.NoError(t, err)
assert.True(t, isReady)
})
t.Run("ComplexDependencyChain", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
// Register all units
units := []unit.ID{unitA, unitB, unitC, unitD}
for _, unit := range units {
err := manager.Register(unit)
require.NoError(t, err)
}
// Create complex dependency graph:
// A depends on B being unit.StatusStarted AND C being "started"
err := manager.AddDependency(unitA, unitB, unit.StatusStarted)
require.NoError(t, err)
err = manager.AddDependency(unitA, unitC, unit.StatusStarted)
require.NoError(t, err)
// B depends on D being "completed"
err = manager.AddDependency(unitB, unitD, unit.StatusComplete)
require.NoError(t, err)
// C depends on D being "completed"
err = manager.AddDependency(unitC, unitD, unit.StatusComplete)
require.NoError(t, err)
// Initially only D is ready
isReady, err := manager.IsReady(unitD)
require.NoError(t, err)
assert.True(t, isReady)
isReady, err = manager.IsReady(unitB)
require.NoError(t, err)
assert.False(t, isReady)
isReady, err = manager.IsReady(unitC)
require.NoError(t, err)
assert.False(t, isReady)
isReady, err = manager.IsReady(unitA)
require.NoError(t, err)
assert.False(t, isReady)
// Update D to "completed" - B and C should become ready
err = manager.UpdateStatus(unitD, unit.StatusComplete)
require.NoError(t, err)
isReady, err = manager.IsReady(unitB)
require.NoError(t, err)
assert.True(t, isReady)
isReady, err = manager.IsReady(unitC)
require.NoError(t, err)
assert.True(t, isReady)
isReady, err = manager.IsReady(unitA)
require.NoError(t, err)
assert.False(t, isReady)
// Update B to unit.StatusStarted - A should still not be ready (needs C)
err = manager.UpdateStatus(unitB, unit.StatusStarted)
require.NoError(t, err)
isReady, err = manager.IsReady(unitA)
require.NoError(t, err)
assert.False(t, isReady)
// Update C to "started" - A should now be ready
err = manager.UpdateStatus(unitC, unit.StatusStarted)
require.NoError(t, err)
isReady, err = manager.IsReady(unitA)
require.NoError(t, err)
assert.True(t, isReady)
})
t.Run("DifferentStatusTypes", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
// Register units
err := manager.Register(unitA)
require.NoError(t, err)
err = manager.Register(unitB)
require.NoError(t, err)
err = manager.Register(unitC)
require.NoError(t, err)
// Given: Unit A depends on Unit B being unit.StatusStarted
err = manager.AddDependency(unitA, unitB, unit.StatusStarted)
require.NoError(t, err)
// Given: Unit A depends on Unit C being "completed"
err = manager.AddDependency(unitA, unitC, unit.StatusComplete)
require.NoError(t, err)
// When: Unit B is started
err = manager.UpdateStatus(unitB, unit.StatusStarted)
require.NoError(t, err)
// Then: Unit A should not be ready, because only one of its dependencies is in the desired state.
// It still requires Unit C to be completed.
isReady, err := manager.IsReady(unitA)
require.NoError(t, err)
assert.False(t, isReady)
// When: Unit C is completed
err = manager.UpdateStatus(unitC, unit.StatusComplete)
require.NoError(t, err)
// Then: Unit A should be ready, because both of its dependencies are in the desired state.
isReady, err = manager.IsReady(unitA)
require.NoError(t, err)
assert.True(t, isReady)
})
}
func TestManager_IsReady(t *testing.T) {
t.Parallel()
t.Run("IsReadyWithUnregisteredUnit", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
// Given: a unit is not registered
u, err := manager.Unit(unitA)
require.NoError(t, err)
assert.Equal(t, unit.StatusNotRegistered, u.Status())
// Then: the unit is not ready
isReady, err := manager.IsReady(unitA)
require.NoError(t, err)
assert.True(t, isReady)
})
}
func TestManager_ToDOT(t *testing.T) {
t.Parallel()
t.Run("ExportSimpleGraph", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
// Register units
err := manager.Register(unitA)
require.NoError(t, err)
err = manager.Register(unitB)
require.NoError(t, err)
// Add dependency
err = manager.AddDependency(unitA, unitB, unit.StatusStarted)
require.NoError(t, err)
dot, err := manager.ExportDOT("test")
require.NoError(t, err)
assert.NotEmpty(t, dot)
assert.Contains(t, dot, "digraph")
})
t.Run("ExportComplexGraph", func(t *testing.T) {
t.Parallel()
manager := unit.NewManager()
// Register all units
units := []unit.ID{unitA, unitB, unitC, unitD}
for _, unit := range units {
err := manager.Register(unit)
require.NoError(t, err)
}
// Create complex dependency graph
// A depends on B and C, B depends on D, C depends on D
err := manager.AddDependency(unitA, unitB, unit.StatusStarted)
require.NoError(t, err)
err = manager.AddDependency(unitA, unitC, unit.StatusStarted)
require.NoError(t, err)
err = manager.AddDependency(unitB, unitD, unit.StatusComplete)
require.NoError(t, err)
err = manager.AddDependency(unitC, unitD, unit.StatusComplete)
require.NoError(t, err)
dot, err := manager.ExportDOT("complex")
require.NoError(t, err)
assert.NotEmpty(t, dot)
assert.Contains(t, dot, "digraph")
})
}
+17 -62
View File
@@ -11,7 +11,6 @@ import (
"os"
"path/filepath"
"runtime"
"slices"
"strconv"
"strings"
"time"
@@ -57,8 +56,6 @@ func workspaceAgent() *serpent.Command {
devcontainers bool
devcontainerProjectDiscovery bool
devcontainerDiscoveryAutostart bool
socketServerEnabled bool
socketPath string
)
agentAuth := &AgentAuth{}
cmd := &serpent.Command{
@@ -204,15 +201,18 @@ func workspaceAgent() *serpent.Command {
// Enable pprof handler
// This prevents the pprof import from being accidentally deleted.
_ = pprof.Handler
if pprofAddress != "" {
pprofSrvClose := ServeHandler(ctx, logger, nil, pprofAddress, "pprof")
defer pprofSrvClose()
pprofSrvClose := ServeHandler(ctx, logger, nil, pprofAddress, "pprof")
defer pprofSrvClose()
if port, err := extractPort(pprofAddress); err == nil {
ignorePorts[port] = "pprof"
}
if port, err := extractPort(pprofAddress); err == nil {
ignorePorts[port] = "pprof"
}
} else {
logger.Debug(ctx, "pprof address is empty, disabling pprof server")
if port, err := extractPort(prometheusAddress); err == nil {
ignorePorts[port] = "prometheus"
}
if port, err := extractPort(debugAddress); err == nil {
ignorePorts[port] = "debug"
}
executablePath, err := os.Executable()
@@ -276,28 +276,6 @@ func workspaceAgent() *serpent.Command {
for {
prometheusRegistry := prometheus.NewRegistry()
promHandler := agent.PrometheusMetricsHandler(prometheusRegistry, logger)
var serverClose []func()
if prometheusAddress != "" {
prometheusSrvClose := ServeHandler(ctx, logger, promHandler, prometheusAddress, "prometheus")
serverClose = append(serverClose, prometheusSrvClose)
if port, err := extractPort(prometheusAddress); err == nil {
ignorePorts[port] = "prometheus"
}
} else {
logger.Debug(ctx, "prometheus address is empty, disabling prometheus server")
}
if debugAddress != "" {
// ServerHandle depends on `agnt.HTTPDebug()`, but `agnt`
// depends on `ignorePorts`. Keep this if statement in sync
// with below.
if port, err := extractPort(debugAddress); err == nil {
ignorePorts[port] = "debug"
}
}
agnt := agent.New(agent.Options{
Client: client,
Logger: logger,
@@ -319,19 +297,12 @@ func workspaceAgent() *serpent.Command {
agentcontainers.WithProjectDiscovery(devcontainerProjectDiscovery),
agentcontainers.WithDiscoveryAutostart(devcontainerDiscoveryAutostart),
},
SocketPath: socketPath,
SocketServerEnabled: socketServerEnabled,
})
if debugAddress != "" {
// ServerHandle depends on `agnt.HTTPDebug()`, but `agnt`
// depends on `ignorePorts`. Keep this if statement in sync
// with above.
debugSrvClose := ServeHandler(ctx, logger, agnt.HTTPDebug(), debugAddress, "debug")
serverClose = append(serverClose, debugSrvClose)
} else {
logger.Debug(ctx, "debug address is empty, disabling debug server")
}
promHandler := agent.PrometheusMetricsHandler(prometheusRegistry, logger)
prometheusSrvClose := ServeHandler(ctx, logger, promHandler, prometheusAddress, "prometheus")
debugSrvClose := ServeHandler(ctx, logger, agnt.HTTPDebug(), debugAddress, "debug")
select {
case <-ctx.Done():
@@ -343,11 +314,8 @@ func workspaceAgent() *serpent.Command {
}
lastErr = agnt.Close()
slices.Reverse(serverClose)
for _, closeFunc := range serverClose {
closeFunc()
}
debugSrvClose()
prometheusSrvClose()
if mustExit {
break
@@ -481,19 +449,6 @@ func workspaceAgent() *serpent.Command {
Description: "Allow the agent to autostart devcontainer projects it discovers based on their configuration.",
Value: serpent.BoolOf(&devcontainerDiscoveryAutostart),
},
{
Flag: "socket-server-enabled",
Default: "false",
Env: "CODER_AGENT_SOCKET_SERVER_ENABLED",
Description: "Enable the agent socket server.",
Value: serpent.BoolOf(&socketServerEnabled),
},
{
Flag: "socket-path",
Env: "CODER_AGENT_SOCKET_PATH",
Description: "Specify the path for the agent socket.",
Value: serpent.StringOf(&socketPath),
},
}
agentAuth.AttachOptions(cmd, false)
return cmd
-45
View File
@@ -178,51 +178,6 @@ func TestWorkspaceAgent(t *testing.T) {
require.Greater(t, atomic.LoadInt64(&called), int64(0), "expected coderd to be reached with custom headers")
require.Greater(t, atomic.LoadInt64(&derpCalled), int64(0), "expected /derp to be called with custom headers")
})
t.Run("DisabledServers", func(t *testing.T) {
t.Parallel()
client, db := coderdtest.NewWithDatabase(t, nil)
user := coderdtest.CreateFirstUser(t, client)
r := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
OrganizationID: user.OrganizationID,
OwnerID: user.UserID,
}).WithAgent().Do()
logDir := t.TempDir()
inv, _ := clitest.New(t,
"agent",
"--auth", "token",
"--agent-token", r.AgentToken,
"--agent-url", client.URL.String(),
"--log-dir", logDir,
"--pprof-address", "",
"--prometheus-address", "",
"--debug-address", "",
)
clitest.Start(t, inv)
// Verify the agent is connected and working.
resources := coderdtest.NewWorkspaceAgentWaiter(t, client, r.Workspace.ID).
MatchResources(matchAgentWithVersion).Wait()
require.Len(t, resources, 1)
require.Len(t, resources[0].Agents, 1)
require.NotEmpty(t, resources[0].Agents[0].Version)
// Verify the servers are not listening by checking the log for disabled
// messages.
require.Eventually(t, func() bool {
logContent, err := os.ReadFile(filepath.Join(logDir, "coder-agent.log"))
if err != nil {
return false
}
logStr := string(logContent)
return strings.Contains(logStr, "pprof address is empty, disabling pprof server") &&
strings.Contains(logStr, "prometheus address is empty, disabling prometheus server") &&
strings.Contains(logStr, "debug address is empty, disabling debug server")
}, testutil.WaitLong, testutil.IntervalMedium)
})
}
func matchAgentWithVersion(rs []codersdk.WorkspaceResource) bool {
+4 -20
View File
@@ -28,9 +28,7 @@ import (
)
// New creates a CLI instance with a configuration pointed to a
// temporary testing directory. The invocation is set up to use a
// global config directory for the given testing.TB, and keyring
// usage disabled.
// temporary testing directory.
func New(t testing.TB, args ...string) (*serpent.Invocation, config.Root) {
var root cli.RootCmd
@@ -61,15 +59,6 @@ func NewWithCommand(
t testing.TB, cmd *serpent.Command, args ...string,
) (*serpent.Invocation, config.Root) {
configDir := config.Root(t.TempDir())
// Keyring usage is disabled here when --global-config is set because many existing
// tests expect the session token to be stored on disk and is not properly instrumented
// for parallel testing against the actual operating system keyring.
invArgs := append([]string{"--global-config", string(configDir)}, args...)
return setupInvocation(t, cmd, invArgs...), configDir
}
func setupInvocation(t testing.TB, cmd *serpent.Command, args ...string,
) *serpent.Invocation {
// I really would like to fail test on error logs, but realistically, turning on by default
// in all our CLI tests is going to create a lot of flaky noise.
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).
@@ -77,21 +66,16 @@ func setupInvocation(t testing.TB, cmd *serpent.Command, args ...string,
Named("cli")
i := &serpent.Invocation{
Command: cmd,
Args: args,
Args: append([]string{"--global-config", string(configDir)}, args...),
Stdin: io.LimitReader(nil, 0),
Stdout: (&logWriter{prefix: "stdout", log: logger}),
Stderr: (&logWriter{prefix: "stderr", log: logger}),
Logger: logger,
}
t.Logf("invoking command: %s %s", cmd.Name(), strings.Join(i.Args, " "))
return i
}
func NewWithDefaultKeyringCommand(t testing.TB, cmd *serpent.Command, args ...string,
) (*serpent.Invocation, config.Root) {
configDir := config.Root(t.TempDir())
invArgs := append([]string{"--global-config", string(configDir)}, args...)
return setupInvocation(t, cmd, invArgs...), configDir
// These can be overridden by the test.
return i, configDir
}
// SetupConfig applies the URL and SessionToken of the client to the config.
+44 -48
View File
@@ -577,57 +577,53 @@ func prepWorkspaceBuild(inv *serpent.Invocation, client *codersdk.Client, args p
return nil, xerrors.Errorf("template version git auth: %w", err)
}
// Only perform dry-run for workspace creation and updates
// Skip for start and restart to avoid unnecessary delays
if args.Action == WorkspaceCreate || args.Action == WorkspaceUpdate {
// Run a dry-run with the given parameters to check correctness
dryRun, err := client.CreateTemplateVersionDryRun(inv.Context(), templateVersion.ID, codersdk.CreateTemplateVersionDryRunRequest{
WorkspaceName: args.NewWorkspaceName,
RichParameterValues: buildParameters,
})
if err != nil {
return nil, xerrors.Errorf("begin workspace dry-run: %w", err)
}
// Run a dry-run with the given parameters to check correctness
dryRun, err := client.CreateTemplateVersionDryRun(inv.Context(), templateVersion.ID, codersdk.CreateTemplateVersionDryRunRequest{
WorkspaceName: args.NewWorkspaceName,
RichParameterValues: buildParameters,
})
if err != nil {
return nil, xerrors.Errorf("begin workspace dry-run: %w", err)
}
matchedProvisioners, err := client.TemplateVersionDryRunMatchedProvisioners(inv.Context(), templateVersion.ID, dryRun.ID)
if err != nil {
return nil, xerrors.Errorf("get matched provisioners: %w", err)
}
cliutil.WarnMatchedProvisioners(inv.Stdout, &matchedProvisioners, dryRun)
_, _ = fmt.Fprintln(inv.Stdout, "Planning workspace...")
err = cliui.ProvisionerJob(inv.Context(), inv.Stdout, cliui.ProvisionerJobOptions{
Fetch: func() (codersdk.ProvisionerJob, error) {
return client.TemplateVersionDryRun(inv.Context(), templateVersion.ID, dryRun.ID)
},
Cancel: func() error {
return client.CancelTemplateVersionDryRun(inv.Context(), templateVersion.ID, dryRun.ID)
},
Logs: func() (<-chan codersdk.ProvisionerJobLog, io.Closer, error) {
return client.TemplateVersionDryRunLogsAfter(inv.Context(), templateVersion.ID, dryRun.ID, 0)
},
// Don't show log output for the dry-run unless there's an error.
Silent: true,
})
if err != nil {
// TODO (Dean): reprompt for parameter values if we deem it to
// be a validation error
return nil, xerrors.Errorf("dry-run workspace: %w", err)
}
matchedProvisioners, err := client.TemplateVersionDryRunMatchedProvisioners(inv.Context(), templateVersion.ID, dryRun.ID)
if err != nil {
return nil, xerrors.Errorf("get matched provisioners: %w", err)
}
cliutil.WarnMatchedProvisioners(inv.Stdout, &matchedProvisioners, dryRun)
_, _ = fmt.Fprintln(inv.Stdout, "Planning workspace...")
err = cliui.ProvisionerJob(inv.Context(), inv.Stdout, cliui.ProvisionerJobOptions{
Fetch: func() (codersdk.ProvisionerJob, error) {
return client.TemplateVersionDryRun(inv.Context(), templateVersion.ID, dryRun.ID)
},
Cancel: func() error {
return client.CancelTemplateVersionDryRun(inv.Context(), templateVersion.ID, dryRun.ID)
},
Logs: func() (<-chan codersdk.ProvisionerJobLog, io.Closer, error) {
return client.TemplateVersionDryRunLogsAfter(inv.Context(), templateVersion.ID, dryRun.ID, 0)
},
// Don't show log output for the dry-run unless there's an error.
Silent: true,
})
if err != nil {
// TODO (Dean): reprompt for parameter values if we deem it to
// be a validation error
return nil, xerrors.Errorf("dry-run workspace: %w", err)
}
resources, err := client.TemplateVersionDryRunResources(inv.Context(), templateVersion.ID, dryRun.ID)
if err != nil {
return nil, xerrors.Errorf("get workspace dry-run resources: %w", err)
}
resources, err := client.TemplateVersionDryRunResources(inv.Context(), templateVersion.ID, dryRun.ID)
if err != nil {
return nil, xerrors.Errorf("get workspace dry-run resources: %w", err)
}
err = cliui.WorkspaceResources(inv.Stdout, resources, cliui.WorkspaceResourcesOptions{
WorkspaceName: args.NewWorkspaceName,
// Since agents haven't connected yet, hiding this makes more sense.
HideAgentState: true,
Title: "Workspace Preview",
})
if err != nil {
return nil, xerrors.Errorf("get resources: %w", err)
}
err = cliui.WorkspaceResources(inv.Stdout, resources, cliui.WorkspaceResourcesOptions{
WorkspaceName: args.NewWorkspaceName,
// Since agents haven't connected yet, hiding this makes more sense.
HideAgentState: true,
Title: "Workspace Preview",
})
if err != nil {
return nil, xerrors.Errorf("get resources: %w", err)
}
return buildParameters, nil
+59 -88
View File
@@ -64,9 +64,7 @@ func (r *RootCmd) scaletestCmd() *serpent.Command {
r.scaletestWorkspaceTraffic(),
r.scaletestAutostart(),
r.scaletestNotifications(),
r.scaletestTaskStatus(),
r.scaletestSMTP(),
r.scaletestPrebuilds(),
},
}
@@ -386,88 +384,6 @@ func (s *scaletestPrometheusFlags) attach(opts *serpent.OptionSet) {
)
}
// workspaceTargetFlags holds common flags for targeting specific workspaces in scale tests.
type workspaceTargetFlags struct {
template string
targetWorkspaces string
useHostLogin bool
}
// attach adds the workspace target flags to the given options set.
func (f *workspaceTargetFlags) attach(opts *serpent.OptionSet) {
*opts = append(*opts,
serpent.Option{
Flag: "template",
FlagShorthand: "t",
Env: "CODER_SCALETEST_TEMPLATE",
Description: "Name or ID of the template. Traffic generation will be limited to workspaces created from this template.",
Value: serpent.StringOf(&f.template),
},
serpent.Option{
Flag: "target-workspaces",
Env: "CODER_SCALETEST_TARGET_WORKSPACES",
Description: "Target a specific range of workspaces in the format [START]:[END] (exclusive). Example: 0:10 will target the 10 first alphabetically sorted workspaces (0-9).",
Value: serpent.StringOf(&f.targetWorkspaces),
},
serpent.Option{
Flag: "use-host-login",
Env: "CODER_SCALETEST_USE_HOST_LOGIN",
Default: "false",
Description: "Connect as the currently logged in user.",
Value: serpent.BoolOf(&f.useHostLogin),
},
)
}
// getTargetedWorkspaces retrieves the workspaces based on the template filter and target range. warnWriter is where to
// write a warning message if any workspaces were skipped due to ownership mismatch.
func (f *workspaceTargetFlags) getTargetedWorkspaces(ctx context.Context, client *codersdk.Client, organizationIDs []uuid.UUID, warnWriter io.Writer) ([]codersdk.Workspace, error) {
// Validate template if provided
if f.template != "" {
_, err := parseTemplate(ctx, client, organizationIDs, f.template)
if err != nil {
return nil, xerrors.Errorf("parse template: %w", err)
}
}
// Parse target range
targetStart, targetEnd, err := parseTargetRange("workspaces", f.targetWorkspaces)
if err != nil {
return nil, xerrors.Errorf("parse target workspaces: %w", err)
}
// Determine owner based on useHostLogin
var owner string
if f.useHostLogin {
owner = codersdk.Me
}
// Get workspaces
workspaces, numSkipped, err := getScaletestWorkspaces(ctx, client, owner, f.template)
if err != nil {
return nil, err
}
if numSkipped > 0 {
cliui.Warnf(warnWriter, "CODER_DISABLE_OWNER_WORKSPACE_ACCESS is set on the deployment.\n\t%d workspace(s) were skipped due to ownership mismatch.\n\tSet --use-host-login to only target workspaces you own.", numSkipped)
}
// Adjust targetEnd if not specified
if targetEnd == 0 {
targetEnd = len(workspaces)
}
// Validate range
if len(workspaces) == 0 {
return nil, xerrors.Errorf("no scaletest workspaces exist")
}
if targetEnd > len(workspaces) {
return nil, xerrors.Errorf("target workspace end %d is greater than the number of workspaces %d", targetEnd, len(workspaces))
}
// Return the sliced workspaces
return workspaces[targetStart:targetEnd], nil
}
func requireAdmin(ctx context.Context, client *codersdk.Client) (codersdk.User, error) {
me, err := client.User(ctx, codersdk.Me)
if err != nil {
@@ -1277,10 +1193,12 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
bytesPerTick int64
ssh bool
disableDirect bool
useHostLogin bool
app string
template string
targetWorkspaces string
workspaceProxyURL string
targetFlags = &workspaceTargetFlags{}
tracingFlags = &scaletestTracingFlags{}
strategy = &scaletestStrategyFlags{}
cleanupStrategy = newScaletestCleanupStrategy()
@@ -1325,9 +1243,15 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
},
}
workspaces, err := targetFlags.getTargetedWorkspaces(ctx, client, me.OrganizationIDs, inv.Stdout)
if template != "" {
_, err := parseTemplate(ctx, client, me.OrganizationIDs, template)
if err != nil {
return xerrors.Errorf("parse template: %w", err)
}
}
targetWorkspaceStart, targetWorkspaceEnd, err := parseTargetRange("workspaces", targetWorkspaces)
if err != nil {
return err
return xerrors.Errorf("parse target workspaces: %w", err)
}
appHost, err := client.AppHost(ctx)
@@ -1335,6 +1259,30 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
return xerrors.Errorf("get app host: %w", err)
}
var owner string
if useHostLogin {
owner = codersdk.Me
}
workspaces, numSkipped, err := getScaletestWorkspaces(inv.Context(), client, owner, template)
if err != nil {
return err
}
if numSkipped > 0 {
cliui.Warnf(inv.Stdout, "CODER_DISABLE_OWNER_WORKSPACE_ACCESS is set on the deployment.\n\t%d workspace(s) were skipped due to ownership mismatch.\n\tSet --use-host-login to only target workspaces you own.", numSkipped)
}
if targetWorkspaceEnd == 0 {
targetWorkspaceEnd = len(workspaces)
}
if len(workspaces) == 0 {
return xerrors.Errorf("no scaletest workspaces exist")
}
if targetWorkspaceEnd > len(workspaces) {
return xerrors.Errorf("target workspace end %d is greater than the number of workspaces %d", targetWorkspaceEnd, len(workspaces))
}
tracerProvider, closeTracing, tracingEnabled, err := tracingFlags.provider(ctx)
if err != nil {
return xerrors.Errorf("create tracer provider: %w", err)
@@ -1359,6 +1307,10 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
th := harness.NewTestHarness(strategy.toStrategy(), cleanupStrategy.toStrategy())
for idx, ws := range workspaces {
if idx < targetWorkspaceStart || idx >= targetWorkspaceEnd {
continue
}
var (
agent codersdk.WorkspaceAgent
name = "workspace-traffic"
@@ -1463,6 +1415,19 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
}
cmd.Options = []serpent.Option{
{
Flag: "template",
FlagShorthand: "t",
Env: "CODER_SCALETEST_TEMPLATE",
Description: "Name or ID of the template. Traffic generation will be limited to workspaces created from this template.",
Value: serpent.StringOf(&template),
},
{
Flag: "target-workspaces",
Env: "CODER_SCALETEST_TARGET_WORKSPACES",
Description: "Target a specific range of workspaces in the format [START]:[END] (exclusive). Example: 0:10 will target the 10 first alphabetically sorted workspaces (0-9).",
Value: serpent.StringOf(&targetWorkspaces),
},
{
Flag: "bytes-per-tick",
Env: "CODER_SCALETEST_WORKSPACE_TRAFFIC_BYTES_PER_TICK",
@@ -1498,6 +1463,13 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
Description: "Send WebSocket traffic to a workspace app (proxied via coderd), cannot be used with --ssh.",
Value: serpent.StringOf(&app),
},
{
Flag: "use-host-login",
Env: "CODER_SCALETEST_USE_HOST_LOGIN",
Default: "false",
Description: "Connect as the currently logged in user.",
Value: serpent.BoolOf(&useHostLogin),
},
{
Flag: "workspace-proxy-url",
Env: "CODER_SCALETEST_WORKSPACE_PROXY_URL",
@@ -1507,7 +1479,6 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
},
}
targetFlags.attach(&cmd.Options)
tracingFlags.attach(&cmd.Options)
strategy.attach(&cmd.Options)
cleanupStrategy.attach(&cmd.Options)
+53 -86
View File
@@ -3,7 +3,6 @@
package cli
import (
"bytes"
"context"
"fmt"
"net/http"
@@ -30,13 +29,12 @@ import (
func (r *RootCmd) scaletestNotifications() *serpent.Command {
var (
userCount int64
templateAdminPercentage float64
notificationTimeout time.Duration
smtpRequestTimeout time.Duration
dialTimeout time.Duration
noCleanup bool
smtpAPIURL string
userCount int64
ownerUserPercentage float64
notificationTimeout time.Duration
dialTimeout time.Duration
noCleanup bool
smtpAPIURL string
tracingFlags = &scaletestTracingFlags{}
@@ -79,24 +77,24 @@ func (r *RootCmd) scaletestNotifications() *serpent.Command {
return xerrors.Errorf("--user-count must be greater than 0")
}
if templateAdminPercentage < 0 || templateAdminPercentage > 100 {
return xerrors.Errorf("--template-admin-percentage must be between 0 and 100")
if ownerUserPercentage < 0 || ownerUserPercentage > 100 {
return xerrors.Errorf("--owner-user-percentage must be between 0 and 100")
}
if smtpAPIURL != "" && !strings.HasPrefix(smtpAPIURL, "http://") && !strings.HasPrefix(smtpAPIURL, "https://") {
return xerrors.Errorf("--smtp-api-url must start with http:// or https://")
}
templateAdminCount := int64(float64(userCount) * templateAdminPercentage / 100)
if templateAdminCount == 0 && templateAdminPercentage > 0 {
templateAdminCount = 1
ownerUserCount := int64(float64(userCount) * ownerUserPercentage / 100)
if ownerUserCount == 0 && ownerUserPercentage > 0 {
ownerUserCount = 1
}
regularUserCount := userCount - templateAdminCount
regularUserCount := userCount - ownerUserCount
_, _ = fmt.Fprintf(inv.Stderr, "Distribution plan:\n")
_, _ = fmt.Fprintf(inv.Stderr, " Total users: %d\n", userCount)
_, _ = fmt.Fprintf(inv.Stderr, " Template admins: %d (%.1f%%)\n", templateAdminCount, templateAdminPercentage)
_, _ = fmt.Fprintf(inv.Stderr, " Regular users: %d (%.1f%%)\n", regularUserCount, 100.0-templateAdminPercentage)
_, _ = fmt.Fprintf(inv.Stderr, " Owner users: %d (%.1f%%)\n", ownerUserCount, ownerUserPercentage)
_, _ = fmt.Fprintf(inv.Stderr, " Regular users: %d (%.1f%%)\n", regularUserCount, 100.0-ownerUserPercentage)
outputs, err := output.parse()
if err != nil {
@@ -129,12 +127,13 @@ func (r *RootCmd) scaletestNotifications() *serpent.Command {
_, _ = fmt.Fprintln(inv.Stderr, "Creating users...")
dialBarrier := &sync.WaitGroup{}
templateAdminWatchBarrier := &sync.WaitGroup{}
ownerWatchBarrier := &sync.WaitGroup{}
dialBarrier.Add(int(userCount))
templateAdminWatchBarrier.Add(int(templateAdminCount))
ownerWatchBarrier.Add(int(ownerUserCount))
expectedNotificationIDs := map[uuid.UUID]struct{}{
notificationsLib.TemplateTemplateDeleted: {},
notificationsLib.TemplateUserAccountCreated: {},
notificationsLib.TemplateUserAccountDeleted: {},
}
triggerTimes := make(map[uuid.UUID]chan time.Time, len(expectedNotificationIDs))
@@ -142,31 +141,20 @@ func (r *RootCmd) scaletestNotifications() *serpent.Command {
triggerTimes[id] = make(chan time.Time, 1)
}
smtpHTTPTransport := &http.Transport{
MaxConnsPerHost: 512,
MaxIdleConnsPerHost: 512,
IdleConnTimeout: 60 * time.Second,
}
smtpHTTPClient := &http.Client{
Transport: smtpHTTPTransport,
}
configs := make([]notifications.Config, 0, userCount)
for range templateAdminCount {
for range ownerUserCount {
config := notifications.Config{
User: createusers.Config{
OrganizationID: me.OrganizationIDs[0],
},
Roles: []string{codersdk.RoleTemplateAdmin},
Roles: []string{codersdk.RoleOwner},
NotificationTimeout: notificationTimeout,
DialTimeout: dialTimeout,
DialBarrier: dialBarrier,
ReceivingWatchBarrier: templateAdminWatchBarrier,
ReceivingWatchBarrier: ownerWatchBarrier,
ExpectedNotificationsIDs: expectedNotificationIDs,
Metrics: metrics,
SMTPApiURL: smtpAPIURL,
SMTPRequestTimeout: smtpRequestTimeout,
SMTPHttpClient: smtpHTTPClient,
}
if err := config.Validate(); err != nil {
return xerrors.Errorf("validate config: %w", err)
@@ -182,8 +170,9 @@ func (r *RootCmd) scaletestNotifications() *serpent.Command {
NotificationTimeout: notificationTimeout,
DialTimeout: dialTimeout,
DialBarrier: dialBarrier,
ReceivingWatchBarrier: templateAdminWatchBarrier,
ReceivingWatchBarrier: ownerWatchBarrier,
Metrics: metrics,
SMTPApiURL: smtpAPIURL,
}
if err := config.Validate(); err != nil {
return xerrors.Errorf("validate config: %w", err)
@@ -191,7 +180,7 @@ func (r *RootCmd) scaletestNotifications() *serpent.Command {
configs = append(configs, config)
}
go triggerNotifications(
go triggerUserNotifications(
ctx,
logger,
client,
@@ -272,30 +261,23 @@ func (r *RootCmd) scaletestNotifications() *serpent.Command {
Required: true,
},
{
Flag: "template-admin-percentage",
Env: "CODER_SCALETEST_NOTIFICATION_TEMPLATE_ADMIN_PERCENTAGE",
Flag: "owner-user-percentage",
Env: "CODER_SCALETEST_NOTIFICATION_OWNER_USER_PERCENTAGE",
Default: "20.0",
Description: "Percentage of users to assign Template Admin role to (0-100).",
Value: serpent.Float64Of(&templateAdminPercentage),
Description: "Percentage of users to assign Owner role to (0-100).",
Value: serpent.Float64Of(&ownerUserPercentage),
},
{
Flag: "notification-timeout",
Env: "CODER_SCALETEST_NOTIFICATION_TIMEOUT",
Default: "10m",
Default: "5m",
Description: "How long to wait for notifications after triggering.",
Value: serpent.DurationOf(&notificationTimeout),
},
{
Flag: "smtp-request-timeout",
Env: "CODER_SCALETEST_SMTP_REQUEST_TIMEOUT",
Default: "5m",
Description: "Timeout for SMTP requests.",
Value: serpent.DurationOf(&smtpRequestTimeout),
},
{
Flag: "dial-timeout",
Env: "CODER_SCALETEST_DIAL_TIMEOUT",
Default: "10m",
Default: "2m",
Description: "Timeout for dialing the notification websocket endpoint.",
Value: serpent.DurationOf(&dialTimeout),
},
@@ -397,9 +379,9 @@ func computeNotificationLatencies(
return nil
}
// triggerNotifications waits for all test users to connect,
// then creates and deletes a test template to trigger notification events for testing.
func triggerNotifications(
// triggerUserNotifications waits for all test users to connect,
// then creates and deletes a test user to trigger notification events for testing.
func triggerUserNotifications(
ctx context.Context,
logger slog.Logger,
client *codersdk.Client,
@@ -432,49 +414,34 @@ func triggerNotifications(
return
}
logger.Info(ctx, "creating test template to test notifications")
const (
triggerUsername = "scaletest-trigger-user"
triggerEmail = "scaletest-trigger@example.com"
)
// Upload empty template file.
file, err := client.Upload(ctx, codersdk.ContentTypeTar, bytes.NewReader([]byte{}))
if err != nil {
logger.Error(ctx, "upload test template", slog.Error(err))
return
}
logger.Info(ctx, "test template uploaded", slog.F("file_id", file.ID))
logger.Info(ctx, "creating test user to test notifications",
slog.F("username", triggerUsername),
slog.F("email", triggerEmail),
slog.F("org_id", orgID))
// Create template version.
version, err := client.CreateTemplateVersion(ctx, orgID, codersdk.CreateTemplateVersionRequest{
StorageMethod: codersdk.ProvisionerStorageMethodFile,
FileID: file.ID,
Provisioner: codersdk.ProvisionerTypeEcho,
testUser, err := client.CreateUserWithOrgs(ctx, codersdk.CreateUserRequestWithOrgs{
OrganizationIDs: []uuid.UUID{orgID},
Username: triggerUsername,
Email: triggerEmail,
Password: "test-password-123",
})
if err != nil {
logger.Error(ctx, "create test template version", slog.Error(err))
logger.Error(ctx, "create test user", slog.Error(err))
return
}
logger.Info(ctx, "test template version created", slog.F("template_version_id", version.ID))
expectedNotifications[notificationsLib.TemplateUserAccountCreated] <- time.Now()
// Create template.
testTemplate, err := client.CreateTemplate(ctx, orgID, codersdk.CreateTemplateRequest{
Name: "scaletest-test-template",
Description: "scaletest-test-template",
VersionID: version.ID,
})
err = client.DeleteUser(ctx, testUser.ID)
if err != nil {
logger.Error(ctx, "create test template", slog.Error(err))
logger.Error(ctx, "delete test user", slog.Error(err))
return
}
logger.Info(ctx, "test template created", slog.F("template_id", testTemplate.ID))
// Delete template to trigger notification.
err = client.DeleteTemplate(ctx, testTemplate.ID)
if err != nil {
logger.Error(ctx, "delete test template", slog.Error(err))
return
}
logger.Info(ctx, "test template deleted", slog.F("template_id", testTemplate.ID))
// Record expected notification.
expectedNotifications[notificationsLib.TemplateTemplateDeleted] <- time.Now()
close(expectedNotifications[notificationsLib.TemplateTemplateDeleted])
expectedNotifications[notificationsLib.TemplateUserAccountDeleted] <- time.Now()
close(expectedNotifications[notificationsLib.TemplateUserAccountCreated])
close(expectedNotifications[notificationsLib.TemplateUserAccountDeleted])
}
-297
View File
@@ -1,297 +0,0 @@
//go:build !slim
package cli
import (
"fmt"
"net/http"
"os/signal"
"strconv"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/scaletest/harness"
"github.com/coder/coder/v2/scaletest/prebuilds"
"github.com/coder/quartz"
"github.com/coder/serpent"
)
func (r *RootCmd) scaletestPrebuilds() *serpent.Command {
var (
numTemplates int64
numPresets int64
numPresetPrebuilds int64
templateVersionJobTimeout time.Duration
prebuildWorkspaceTimeout time.Duration
noCleanup bool
tracingFlags = &scaletestTracingFlags{}
timeoutStrategy = &timeoutFlags{}
cleanupStrategy = newScaletestCleanupStrategy()
output = &scaletestOutputFlags{}
prometheusFlags = &scaletestPrometheusFlags{}
)
cmd := &serpent.Command{
Use: "prebuilds",
Short: "Creates prebuild workspaces on the Coder server.",
Handler: func(inv *serpent.Invocation) error {
ctx := inv.Context()
client, err := r.InitClient(inv)
if err != nil {
return err
}
notifyCtx, stop := signal.NotifyContext(ctx, StopSignals...)
defer stop()
ctx = notifyCtx
me, err := requireAdmin(ctx, client)
if err != nil {
return err
}
client.HTTPClient = &http.Client{
Transport: &codersdk.HeaderTransport{
Transport: http.DefaultTransport,
Header: map[string][]string{
codersdk.BypassRatelimitHeader: {"true"},
},
},
}
if numTemplates <= 0 {
return xerrors.Errorf("--num-templates must be greater than 0")
}
if numPresets <= 0 {
return xerrors.Errorf("--num-presets must be greater than 0")
}
if numPresetPrebuilds <= 0 {
return xerrors.Errorf("--num-preset-prebuilds must be greater than 0")
}
outputs, err := output.parse()
if err != nil {
return xerrors.Errorf("parse output flags: %w", err)
}
tracerProvider, closeTracing, tracingEnabled, err := tracingFlags.provider(ctx)
if err != nil {
return xerrors.Errorf("create tracer provider: %w", err)
}
defer func() {
_, _ = fmt.Fprintln(inv.Stderr, "\nUploading traces...")
if err := closeTracing(ctx); err != nil {
_, _ = fmt.Fprintf(inv.Stderr, "\nError uploading traces: %+v\n", err)
}
_, _ = fmt.Fprintf(inv.Stderr, "Waiting %s for prometheus metrics to be scraped\n", prometheusFlags.Wait)
<-time.After(prometheusFlags.Wait)
}()
tracer := tracerProvider.Tracer(scaletestTracerName)
reg := prometheus.NewRegistry()
metrics := prebuilds.NewMetrics(reg)
logger := inv.Logger
prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), prometheusFlags.Address, "prometheus")
defer prometheusSrvClose()
err = client.PutPrebuildsSettings(ctx, codersdk.PrebuildsSettings{
ReconciliationPaused: true,
})
if err != nil {
return xerrors.Errorf("pause prebuilds: %w", err)
}
setupBarrier := new(sync.WaitGroup)
setupBarrier.Add(int(numTemplates))
creationBarrier := new(sync.WaitGroup)
creationBarrier.Add(int(numTemplates))
deletionSetupBarrier := new(sync.WaitGroup)
deletionSetupBarrier.Add(1)
deletionBarrier := new(sync.WaitGroup)
deletionBarrier.Add(int(numTemplates))
th := harness.NewTestHarness(timeoutStrategy.wrapStrategy(harness.ConcurrentExecutionStrategy{}), cleanupStrategy.toStrategy())
for i := range numTemplates {
id := strconv.Itoa(int(i))
cfg := prebuilds.Config{
OrganizationID: me.OrganizationIDs[0],
NumPresets: int(numPresets),
NumPresetPrebuilds: int(numPresetPrebuilds),
TemplateVersionJobTimeout: templateVersionJobTimeout,
PrebuildWorkspaceTimeout: prebuildWorkspaceTimeout,
Metrics: metrics,
SetupBarrier: setupBarrier,
CreationBarrier: creationBarrier,
DeletionSetupBarrier: deletionSetupBarrier,
DeletionBarrier: deletionBarrier,
Clock: quartz.NewReal(),
}
err := cfg.Validate()
if err != nil {
return xerrors.Errorf("validate config: %w", err)
}
var runner harness.Runnable = prebuilds.NewRunner(client, cfg)
if tracingEnabled {
runner = &runnableTraceWrapper{
tracer: tracer,
spanName: fmt.Sprintf("prebuilds/%s", id),
runner: runner,
}
}
th.AddRun("prebuilds", id, runner)
}
_, _ = fmt.Fprintf(inv.Stderr, "Creating %d templates with %d presets and %d prebuilds per preset...\n",
numTemplates, numPresets, numPresetPrebuilds)
_, _ = fmt.Fprintf(inv.Stderr, "Total expected prebuilds: %d\n", numTemplates*numPresets*numPresetPrebuilds)
testCtx, testCancel := timeoutStrategy.toContext(ctx)
defer testCancel()
runErrCh := make(chan error, 1)
go func() {
runErrCh <- th.Run(testCtx)
}()
_, _ = fmt.Fprintln(inv.Stderr, "Waiting for all templates to be created...")
setupBarrier.Wait()
_, _ = fmt.Fprintln(inv.Stderr, "All templates created")
err = client.PutPrebuildsSettings(ctx, codersdk.PrebuildsSettings{
ReconciliationPaused: false,
})
if err != nil {
return xerrors.Errorf("resume prebuilds: %w", err)
}
_, _ = fmt.Fprintln(inv.Stderr, "Waiting for all prebuilds to be created...")
creationBarrier.Wait()
_, _ = fmt.Fprintln(inv.Stderr, "All prebuilds created")
err = client.PutPrebuildsSettings(ctx, codersdk.PrebuildsSettings{
ReconciliationPaused: true,
})
if err != nil {
return xerrors.Errorf("pause prebuilds before deletion: %w", err)
}
_, _ = fmt.Fprintln(inv.Stderr, "Prebuilds paused, signaling runners to prepare for deletion")
deletionSetupBarrier.Done()
_, _ = fmt.Fprintln(inv.Stderr, "Waiting for all templates to be updated with 0 prebuilds...")
deletionBarrier.Wait()
_, _ = fmt.Fprintln(inv.Stderr, "All templates updated")
err = client.PutPrebuildsSettings(ctx, codersdk.PrebuildsSettings{
ReconciliationPaused: false,
})
if err != nil {
return xerrors.Errorf("resume prebuilds for deletion: %w", err)
}
_, _ = fmt.Fprintln(inv.Stderr, "Waiting for all prebuilds to be deleted...")
err = <-runErrCh
if err != nil {
return xerrors.Errorf("run test harness (harness failure, not a test failure): %w", err)
}
// If the command was interrupted, skip cleanup & stats
if notifyCtx.Err() != nil {
return notifyCtx.Err()
}
res := th.Results()
for _, o := range outputs {
err = o.write(res, inv.Stdout)
if err != nil {
return xerrors.Errorf("write output %q to %q: %w", o.format, o.path, err)
}
}
if !noCleanup {
_, _ = fmt.Fprintln(inv.Stderr, "\nStarting cleanup (deleting templates)...")
cleanupCtx, cleanupCancel := cleanupStrategy.toContext(ctx)
defer cleanupCancel()
err = th.Cleanup(cleanupCtx)
if err != nil {
return xerrors.Errorf("cleanup tests: %w", err)
}
// If the cleanup was interrupted, skip stats
if notifyCtx.Err() != nil {
return notifyCtx.Err()
}
}
if res.TotalFail > 0 {
return xerrors.New("prebuild creation test failed, see above for more details")
}
return nil
},
}
cmd.Options = serpent.OptionSet{
{
Flag: "num-templates",
Env: "CODER_SCALETEST_PREBUILDS_NUM_TEMPLATES",
Default: "1",
Description: "Number of templates to create for the test.",
Value: serpent.Int64Of(&numTemplates),
},
{
Flag: "num-presets",
Env: "CODER_SCALETEST_PREBUILDS_NUM_PRESETS",
Default: "1",
Description: "Number of presets per template.",
Value: serpent.Int64Of(&numPresets),
},
{
Flag: "num-preset-prebuilds",
Env: "CODER_SCALETEST_PREBUILDS_NUM_PRESET_PREBUILDS",
Default: "1",
Description: "Number of prebuilds per preset.",
Value: serpent.Int64Of(&numPresetPrebuilds),
},
{
Flag: "template-version-job-timeout",
Env: "CODER_SCALETEST_PREBUILDS_TEMPLATE_VERSION_JOB_TIMEOUT",
Default: "5m",
Description: "Timeout for template version provisioning jobs.",
Value: serpent.DurationOf(&templateVersionJobTimeout),
},
{
Flag: "prebuild-workspace-timeout",
Env: "CODER_SCALETEST_PREBUILDS_WORKSPACE_TIMEOUT",
Default: "10m",
Description: "Timeout for all prebuild workspaces to be created/deleted.",
Value: serpent.DurationOf(&prebuildWorkspaceTimeout),
},
{
Flag: "skip-cleanup",
Env: "CODER_SCALETEST_PREBUILDS_SKIP_CLEANUP",
Description: "Skip cleanup (deletion test) and leave resources intact.",
Value: serpent.BoolOf(&noCleanup),
},
}
tracingFlags.attach(&cmd.Options)
timeoutStrategy.attach(&cmd.Options)
cleanupStrategy.attach(&cmd.Options)
output.attach(&cmd.Options)
prometheusFlags.attach(&cmd.Options)
return cmd
}
-275
View File
@@ -1,275 +0,0 @@
//go:build !slim
package cli
import (
"context"
"fmt"
"net/http"
"sync"
"time"
"github.com/google/uuid"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"golang.org/x/xerrors"
"cdr.dev/slog"
"cdr.dev/slog/sloggers/sloghuman"
"github.com/coder/serpent"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/scaletest/harness"
"github.com/coder/coder/v2/scaletest/taskstatus"
)
const (
taskStatusTestName = "task-status"
)
func (r *RootCmd) scaletestTaskStatus() *serpent.Command {
var (
count int64
template string
workspaceNamePrefix string
appSlug string
reportStatusPeriod time.Duration
reportStatusDuration time.Duration
baselineDuration time.Duration
tracingFlags = &scaletestTracingFlags{}
prometheusFlags = &scaletestPrometheusFlags{}
timeoutStrategy = &timeoutFlags{}
cleanupStrategy = newScaletestCleanupStrategy()
output = &scaletestOutputFlags{}
)
orgContext := NewOrganizationContext()
cmd := &serpent.Command{
Use: "task-status",
Short: "Generates load on the Coder server by simulating task status reporting",
Long: `This test creates external workspaces and simulates AI agents reporting task status.
After all runners connect, it waits for the baseline duration before triggering status reporting.`,
Handler: func(inv *serpent.Invocation) error {
ctx := inv.Context()
outputs, err := output.parse()
if err != nil {
return xerrors.Errorf("could not parse --output flags: %w", err)
}
client, err := r.InitClient(inv)
if err != nil {
return err
}
org, err := orgContext.Selected(inv, client)
if err != nil {
return err
}
_, err = requireAdmin(ctx, client)
if err != nil {
return err
}
// Disable rate limits for this test
client.HTTPClient = &http.Client{
Transport: &codersdk.HeaderTransport{
Transport: http.DefaultTransport,
Header: map[string][]string{
codersdk.BypassRatelimitHeader: {"true"},
},
},
}
// Find the template
tpl, err := parseTemplate(ctx, client, []uuid.UUID{org.ID}, template)
if err != nil {
return xerrors.Errorf("parse template %q: %w", template, err)
}
templateID := tpl.ID
reg := prometheus.NewRegistry()
metrics := taskstatus.NewMetrics(reg)
logger := slog.Make(sloghuman.Sink(inv.Stdout)).Leveled(slog.LevelDebug)
prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), prometheusFlags.Address, "prometheus")
defer prometheusSrvClose()
tracerProvider, closeTracing, tracingEnabled, err := tracingFlags.provider(ctx)
if err != nil {
return xerrors.Errorf("create tracer provider: %w", err)
}
defer func() {
// Allow time for traces to flush even if command context is
// canceled. This is a no-op if tracing is not enabled.
_, _ = fmt.Fprintln(inv.Stderr, "\nUploading traces...")
if err := closeTracing(ctx); err != nil {
_, _ = fmt.Fprintf(inv.Stderr, "\nError uploading traces: %+v\n", err)
}
// Wait for prometheus metrics to be scraped
_, _ = fmt.Fprintf(inv.Stderr, "Waiting %s for prometheus metrics to be scraped\n", prometheusFlags.Wait)
<-time.After(prometheusFlags.Wait)
}()
tracer := tracerProvider.Tracer(scaletestTracerName)
// Setup shared resources for coordination
connectedWaitGroup := &sync.WaitGroup{}
connectedWaitGroup.Add(int(count))
startReporting := make(chan struct{})
// Create the test harness
th := harness.NewTestHarness(
timeoutStrategy.wrapStrategy(harness.ConcurrentExecutionStrategy{}),
cleanupStrategy.toStrategy(),
)
// Create runners
for i := range count {
workspaceName := fmt.Sprintf("%s-%d", workspaceNamePrefix, i)
cfg := taskstatus.Config{
TemplateID: templateID,
WorkspaceName: workspaceName,
AppSlug: appSlug,
ConnectedWaitGroup: connectedWaitGroup,
StartReporting: startReporting,
ReportStatusPeriod: reportStatusPeriod,
ReportStatusDuration: reportStatusDuration,
Metrics: metrics,
MetricLabelValues: []string{},
}
if err := cfg.Validate(); err != nil {
return xerrors.Errorf("validate config for runner %d: %w", i, err)
}
var runner harness.Runnable = taskstatus.NewRunner(client, cfg)
if tracingEnabled {
runner = &runnableTraceWrapper{
tracer: tracer,
spanName: fmt.Sprintf("%s/%d", taskStatusTestName, i),
runner: runner,
}
}
th.AddRun(taskStatusTestName, workspaceName, runner)
}
// Start the test in a separate goroutine so we can coordinate timing
testCtx, testCancel := timeoutStrategy.toContext(ctx)
defer testCancel()
testDone := make(chan error)
go func() {
testDone <- th.Run(testCtx)
}()
// Wait for all runners to connect
logger.Info(ctx, "waiting for all runners to connect")
waitCtx, waitCancel := context.WithTimeout(ctx, 5*time.Minute)
defer waitCancel()
connectDone := make(chan struct{})
go func() {
connectedWaitGroup.Wait()
close(connectDone)
}()
select {
case <-waitCtx.Done():
return xerrors.Errorf("timeout waiting for runners to connect")
case <-connectDone:
logger.Info(ctx, "all runners connected")
}
// Wait for baseline duration
logger.Info(ctx, "waiting for baseline duration", slog.F("duration", baselineDuration))
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(baselineDuration):
}
// Trigger all runners to start reporting
logger.Info(ctx, "triggering runners to start reporting task status")
close(startReporting)
// Wait for the test to complete
err = <-testDone
if err != nil {
return xerrors.Errorf("run test harness: %w", err)
}
res := th.Results()
for _, o := range outputs {
err = o.write(res, inv.Stdout)
if err != nil {
return xerrors.Errorf("write output %q to %q: %w", o.format, o.path, err)
}
}
cleanupCtx, cleanupCancel := cleanupStrategy.toContext(ctx)
defer cleanupCancel()
err = th.Cleanup(cleanupCtx)
if err != nil {
return xerrors.Errorf("cleanup tests: %w", err)
}
if res.TotalFail > 0 {
return xerrors.New("load test failed, see above for more details")
}
return nil
},
}
cmd.Options = serpent.OptionSet{
{
Flag: "count",
Description: "Number of concurrent runners to create.",
Default: "10",
Value: serpent.Int64Of(&count),
},
{
Flag: "template",
Description: "Name or UUID of the template to use for the scale test. The template MUST include a coder_external_agent and a coder_app.",
Default: "scaletest-task-status",
Value: serpent.StringOf(&template),
},
{
Flag: "workspace-name-prefix",
Description: "Prefix for workspace names (will be suffixed with index).",
Default: "scaletest-task-status",
Value: serpent.StringOf(&workspaceNamePrefix),
},
{
Flag: "app-slug",
Description: "Slug of the app designated as the AI Agent.",
Default: "ai-agent",
Value: serpent.StringOf(&appSlug),
},
{
Flag: "report-status-period",
Description: "Time between reporting task statuses.",
Default: "10s",
Value: serpent.DurationOf(&reportStatusPeriod),
},
{
Flag: "report-status-duration",
Description: "Total time to report task statuses after baseline.",
Default: "15m",
Value: serpent.DurationOf(&reportStatusDuration),
},
{
Flag: "baseline-duration",
Description: "Duration to wait after all runners connect before starting to report status.",
Default: "10m",
Value: serpent.DurationOf(&baselineDuration),
},
}
orgContext.AttachOptions(cmd)
output.attach(&cmd.Options)
tracingFlags.attach(&cmd.Options)
prometheusFlags.attach(&cmd.Options)
timeoutStrategy.attach(&cmd.Options)
cleanupStrategy.attach(&cmd.Options)
return cmd
}
+1 -1
View File
@@ -8,7 +8,7 @@ func (r *RootCmd) tasksCommand() *serpent.Command {
cmd := &serpent.Command{
Use: "task",
Aliases: []string{"tasks"},
Short: "Manage tasks",
Short: "Experimental task commands.",
Handler: func(i *serpent.Invocation) error {
return i.Command.HelpHandler(i)
},
@@ -28,27 +28,27 @@ func (r *RootCmd) taskCreate() *serpent.Command {
cmd := &serpent.Command{
Use: "create [input]",
Short: "Create a task",
Short: "Create an experimental task",
Long: FormatExamples(
Example{
Description: "Create a task with direct input",
Command: "coder task create \"Add authentication to the user service\"",
Command: "coder exp task create \"Add authentication to the user service\"",
},
Example{
Description: "Create a task with stdin input",
Command: "echo \"Add authentication to the user service\" | coder task create",
Command: "echo \"Add authentication to the user service\" | coder exp task create",
},
Example{
Description: "Create a task with a specific name",
Command: "coder task create --name task1 \"Add authentication to the user service\"",
Command: "coder exp task create --name task1 \"Add authentication to the user service\"",
},
Example{
Description: "Create a task from a specific template / preset",
Command: "coder task create --template backend-dev --preset \"My Preset\" \"Add authentication to the user service\"",
Command: "coder exp task create --template backend-dev --preset \"My Preset\" \"Add authentication to the user service\"",
},
Example{
Description: "Create a task for another user (requires appropriate permissions)",
Command: "coder task create --owner user@example.com \"Add authentication to the user service\"",
Command: "coder exp task create --owner user@example.com \"Add authentication to the user service\"",
},
),
Middleware: serpent.Chain(
@@ -111,7 +111,8 @@ func (r *RootCmd) taskCreate() *serpent.Command {
}
var (
ctx = inv.Context()
ctx = inv.Context()
expClient = codersdk.NewExperimentalClient(client)
taskInput string
templateVersionID uuid.UUID
@@ -207,7 +208,7 @@ func (r *RootCmd) taskCreate() *serpent.Command {
templateVersionPresetID = preset.ID
}
task, err := client.CreateTask(ctx, ownerArg, codersdk.CreateTaskRequest{
task, err := expClient.CreateTask(ctx, ownerArg, codersdk.CreateTaskRequest{
Name: taskName,
TemplateVersionID: templateVersionID,
TemplateVersionPresetID: templateVersionPresetID,
@@ -69,7 +69,7 @@ func TestTaskCreate(t *testing.T) {
ActiveVersionID: templateVersionID,
},
})
case fmt.Sprintf("/api/v2/tasks/%s", username):
case fmt.Sprintf("/api/experimental/tasks/%s", username):
var req codersdk.CreateTaskRequest
if !httpapi.Read(ctx, w, r, &req) {
return
@@ -329,7 +329,7 @@ func TestTaskCreate(t *testing.T) {
ctx = testutil.Context(t, testutil.WaitShort)
srv = httptest.NewServer(tt.handler(t, ctx))
client = codersdk.New(testutil.MustURL(t, srv.URL))
args = []string{"task", "create"}
args = []string{"exp", "task", "create"}
sb strings.Builder
err error
)
@@ -17,19 +17,19 @@ import (
func (r *RootCmd) taskDelete() *serpent.Command {
cmd := &serpent.Command{
Use: "delete <task> [<task> ...]",
Short: "Delete tasks",
Short: "Delete experimental tasks",
Long: FormatExamples(
Example{
Description: "Delete a single task.",
Command: "$ coder task delete task1",
Command: "$ coder exp task delete task1",
},
Example{
Description: "Delete multiple tasks.",
Command: "$ coder task delete task1 task2 task3",
Command: "$ coder exp task delete task1 task2 task3",
},
Example{
Description: "Delete a task without confirmation.",
Command: "$ coder task delete task4 --yes",
Command: "$ coder exp task delete task4 --yes",
},
),
Middleware: serpent.Chain(
@@ -44,10 +44,11 @@ func (r *RootCmd) taskDelete() *serpent.Command {
if err != nil {
return err
}
exp := codersdk.NewExperimentalClient(client)
var tasks []codersdk.Task
for _, identifier := range inv.Args {
task, err := client.TaskByIdentifier(ctx, identifier)
task, err := exp.TaskByIdentifier(ctx, identifier)
if err != nil {
return xerrors.Errorf("resolve task %q: %w", identifier, err)
}
@@ -70,7 +71,7 @@ func (r *RootCmd) taskDelete() *serpent.Command {
for i, task := range tasks {
display := displayList[i]
if err := client.DeleteTask(ctx, task.OwnerName, task.ID); err != nil {
if err := exp.DeleteTask(ctx, task.OwnerName, task.ID); err != nil {
return xerrors.Errorf("delete task %q: %w", display, err)
}
_, _ = fmt.Fprintln(
@@ -56,15 +56,20 @@ func TestExpTaskDelete(t *testing.T) {
taskID := uuid.MustParse(id1)
return func(w http.ResponseWriter, r *http.Request) {
switch {
case r.Method == http.MethodGet && r.URL.Path == "/api/v2/tasks/me/exists":
case r.Method == http.MethodGet && r.URL.Path == "/api/experimental/tasks" && r.URL.Query().Get("q") == "owner:\"me\"":
c.nameResolves.Add(1)
httpapi.Write(r.Context(), w, http.StatusOK,
codersdk.Task{
httpapi.Write(r.Context(), w, http.StatusOK, struct {
Tasks []codersdk.Task `json:"tasks"`
Count int `json:"count"`
}{
Tasks: []codersdk.Task{{
ID: taskID,
Name: "exists",
OwnerName: "me",
})
case r.Method == http.MethodDelete && r.URL.Path == "/api/v2/tasks/me/"+id1:
}},
Count: 1,
})
case r.Method == http.MethodDelete && r.URL.Path == "/api/experimental/tasks/me/"+id1:
c.deleteCalls.Add(1)
w.WriteHeader(http.StatusAccepted)
default:
@@ -82,13 +87,13 @@ func TestExpTaskDelete(t *testing.T) {
buildHandler: func(c *testCounters) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
switch {
case r.Method == http.MethodGet && r.URL.Path == "/api/v2/tasks/me/"+id2:
case r.Method == http.MethodGet && r.URL.Path == "/api/experimental/tasks/me/"+id2:
httpapi.Write(r.Context(), w, http.StatusOK, codersdk.Task{
ID: uuid.MustParse(id2),
OwnerName: "me",
Name: "uuid-task",
})
case r.Method == http.MethodDelete && r.URL.Path == "/api/v2/tasks/me/"+id2:
case r.Method == http.MethodDelete && r.URL.Path == "/api/experimental/tasks/me/"+id2:
c.deleteCalls.Add(1)
w.WriteHeader(http.StatusAccepted)
default:
@@ -102,26 +107,32 @@ func TestExpTaskDelete(t *testing.T) {
name: "Multiple_YesFlag",
args: []string{"--yes", "first", id4},
buildHandler: func(c *testCounters) http.HandlerFunc {
firstID := uuid.MustParse(id3)
return func(w http.ResponseWriter, r *http.Request) {
switch {
case r.Method == http.MethodGet && r.URL.Path == "/api/v2/tasks/me/first":
case r.Method == http.MethodGet && r.URL.Path == "/api/experimental/tasks" && r.URL.Query().Get("q") == "owner:\"me\"":
c.nameResolves.Add(1)
httpapi.Write(r.Context(), w, http.StatusOK, codersdk.Task{
ID: uuid.MustParse(id3),
Name: "first",
OwnerName: "me",
httpapi.Write(r.Context(), w, http.StatusOK, struct {
Tasks []codersdk.Task `json:"tasks"`
Count int `json:"count"`
}{
Tasks: []codersdk.Task{{
ID: firstID,
Name: "first",
OwnerName: "me",
}},
Count: 1,
})
case r.Method == http.MethodGet && r.URL.Path == "/api/v2/tasks/me/"+id4:
c.nameResolves.Add(1)
case r.Method == http.MethodGet && r.URL.Path == "/api/experimental/tasks/me/"+id4:
httpapi.Write(r.Context(), w, http.StatusOK, codersdk.Task{
ID: uuid.MustParse(id4),
OwnerName: "me",
Name: "uuid-task-4",
Name: "uuid-task-2",
})
case r.Method == http.MethodDelete && r.URL.Path == "/api/v2/tasks/me/"+id3:
case r.Method == http.MethodDelete && r.URL.Path == "/api/experimental/tasks/me/"+id3:
c.deleteCalls.Add(1)
w.WriteHeader(http.StatusAccepted)
case r.Method == http.MethodDelete && r.URL.Path == "/api/v2/tasks/me/"+id4:
case r.Method == http.MethodDelete && r.URL.Path == "/api/experimental/tasks/me/"+id4:
c.deleteCalls.Add(1)
w.WriteHeader(http.StatusAccepted)
default:
@@ -130,7 +141,7 @@ func TestExpTaskDelete(t *testing.T) {
}
},
wantDeleteCalls: 2,
wantNameResolves: 2,
wantNameResolves: 1,
wantDeletedMessage: 2,
},
{
@@ -140,7 +151,7 @@ func TestExpTaskDelete(t *testing.T) {
buildHandler: func(_ *testCounters) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
switch {
case r.Method == http.MethodGet && r.URL.Path == "/api/v2/tasks" && r.URL.Query().Get("q") == "owner:\"me\"":
case r.Method == http.MethodGet && r.URL.Path == "/api/experimental/tasks" && r.URL.Query().Get("q") == "owner:\"me\"":
httpapi.Write(r.Context(), w, http.StatusOK, struct {
Tasks []codersdk.Task `json:"tasks"`
Count int `json:"count"`
@@ -163,14 +174,20 @@ func TestExpTaskDelete(t *testing.T) {
taskID := uuid.MustParse(id5)
return func(w http.ResponseWriter, r *http.Request) {
switch {
case r.Method == http.MethodGet && r.URL.Path == "/api/v2/tasks/me/bad":
case r.Method == http.MethodGet && r.URL.Path == "/api/experimental/tasks" && r.URL.Query().Get("q") == "owner:\"me\"":
c.nameResolves.Add(1)
httpapi.Write(r.Context(), w, http.StatusOK, codersdk.Task{
ID: taskID,
Name: "bad",
OwnerName: "me",
httpapi.Write(r.Context(), w, http.StatusOK, struct {
Tasks []codersdk.Task `json:"tasks"`
Count int `json:"count"`
}{
Tasks: []codersdk.Task{{
ID: taskID,
Name: "bad",
OwnerName: "me",
}},
Count: 1,
})
case r.Method == http.MethodDelete && r.URL.Path == "/api/v2/tasks/me/bad":
case r.Method == http.MethodDelete && r.URL.Path == "/api/experimental/tasks/me/"+id5:
httpapi.InternalServerError(w, xerrors.New("boom"))
default:
httpapi.InternalServerError(w, xerrors.New("unwanted path: "+r.Method+" "+r.URL.Path))
@@ -193,7 +210,7 @@ func TestExpTaskDelete(t *testing.T) {
client := codersdk.New(testutil.MustURL(t, srv.URL))
args := append([]string{"task", "delete"}, tc.args...)
args := append([]string{"exp", "task", "delete"}, tc.args...)
inv, root := clitest.New(t, args...)
inv = inv.WithContext(ctx)
clitest.SetupConfig(t, client, root)
+8 -7
View File
@@ -69,27 +69,27 @@ func (r *RootCmd) taskList() *serpent.Command {
cmd := &serpent.Command{
Use: "list",
Short: "List tasks",
Short: "List experimental tasks",
Long: FormatExamples(
Example{
Description: "List tasks for the current user.",
Command: "coder task list",
Command: "coder exp task list",
},
Example{
Description: "List tasks for a specific user.",
Command: "coder task list --user someone-else",
Command: "coder exp task list --user someone-else",
},
Example{
Description: "List all tasks you can view.",
Command: "coder task list --all",
Command: "coder exp task list --all",
},
Example{
Description: "List all your running tasks.",
Command: "coder task list --status running",
Command: "coder exp task list --status running",
},
Example{
Description: "As above, but only show IDs.",
Command: "coder task list --status running --quiet",
Command: "coder exp task list --status running --quiet",
},
),
Aliases: []string{"ls"},
@@ -135,13 +135,14 @@ func (r *RootCmd) taskList() *serpent.Command {
}
ctx := inv.Context()
exp := codersdk.NewExperimentalClient(client)
targetUser := strings.TrimSpace(user)
if targetUser == "" && !all {
targetUser = codersdk.Me
}
tasks, err := client.Tasks(ctx, &codersdk.TasksFilter{
tasks, err := exp.Tasks(ctx, &codersdk.TasksFilter{
Owner: targetUser,
Status: codersdk.TaskStatus(statusFilter),
})
@@ -2,6 +2,7 @@ package cli_test
import (
"bytes"
"context"
"database/sql"
"encoding/json"
"io"
@@ -18,7 +19,10 @@ import (
"github.com/coder/coder/v2/cli/clitest"
"github.com/coder/coder/v2/coderd/coderdtest"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/database/dbfake"
"github.com/coder/coder/v2/coderd/database/dbgen"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/coderd/util/slice"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/pty/ptytest"
@@ -39,22 +43,76 @@ func makeAITask(t *testing.T, db database.Store, orgID, adminID, ownerID uuid.UU
},
}).Do()
build := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
ws := database.WorkspaceTable{
OrganizationID: orgID,
OwnerID: ownerID,
TemplateID: tv.Template.ID,
}).
}
build := dbfake.WorkspaceBuild(t, db, ws).
Seed(database.WorkspaceBuild{
TemplateVersionID: tv.TemplateVersion.ID,
Transition: transition,
}).
WithAgent().
WithTask(database.TaskTable{
Prompt: prompt,
}, nil).
Do()
}).WithAgent().Do()
dbgen.WorkspaceBuildParameters(t, db, []database.WorkspaceBuildParameter{
{
WorkspaceBuildID: build.Build.ID,
Name: codersdk.AITaskPromptParameterName,
Value: prompt,
},
})
agents, err := db.GetWorkspaceAgentsByWorkspaceAndBuildNumber(
dbauthz.AsSystemRestricted(context.Background()),
database.GetWorkspaceAgentsByWorkspaceAndBuildNumberParams{
WorkspaceID: build.Workspace.ID,
BuildNumber: build.Build.BuildNumber,
},
)
require.NoError(t, err)
require.NotEmpty(t, agents)
agentID := agents[0].ID
return build.Task
// Create a workspace app and set it as the sidebar app.
app := dbgen.WorkspaceApp(t, db, database.WorkspaceApp{
AgentID: agentID,
Slug: "task-sidebar",
DisplayName: "Task Sidebar",
External: false,
})
// Update build flags to reference the sidebar app and HasAITask=true.
err = db.UpdateWorkspaceBuildFlagsByID(
dbauthz.AsSystemRestricted(context.Background()),
database.UpdateWorkspaceBuildFlagsByIDParams{
ID: build.Build.ID,
HasAITask: sql.NullBool{Bool: true, Valid: true},
HasExternalAgent: sql.NullBool{Bool: false, Valid: false},
SidebarAppID: uuid.NullUUID{UUID: app.ID, Valid: true},
UpdatedAt: build.Build.UpdatedAt,
},
)
require.NoError(t, err)
// Create a task record in the tasks table for the new data model.
task := dbgen.Task(t, db, database.TaskTable{
OrganizationID: orgID,
OwnerID: ownerID,
Name: build.Workspace.Name,
WorkspaceID: uuid.NullUUID{UUID: build.Workspace.ID, Valid: true},
TemplateVersionID: tv.TemplateVersion.ID,
TemplateParameters: []byte("{}"),
Prompt: prompt,
CreatedAt: dbtime.Now(),
})
// Link the task to the workspace app.
dbgen.TaskWorkspaceApp(t, db, database.TaskWorkspaceApp{
TaskID: task.ID,
WorkspaceBuildNumber: build.Build.BuildNumber,
WorkspaceAgentID: uuid.NullUUID{UUID: agentID, Valid: true},
WorkspaceAppID: uuid.NullUUID{UUID: app.ID, Valid: true},
})
return task
}
func TestExpTaskList(t *testing.T) {
@@ -69,7 +127,7 @@ func TestExpTaskList(t *testing.T) {
owner := coderdtest.CreateFirstUser(t, client)
memberClient, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
inv, root := clitest.New(t, "task", "list")
inv, root := clitest.New(t, "exp", "task", "list")
clitest.SetupConfig(t, memberClient, root)
pty := ptytest.New(t).Attach(inv)
@@ -93,7 +151,7 @@ func TestExpTaskList(t *testing.T) {
wantPrompt := "build me a web app"
task := makeAITask(t, db, owner.OrganizationID, owner.UserID, memberUser.ID, database.WorkspaceTransitionStart, wantPrompt)
inv, root := clitest.New(t, "task", "list", "--column", "id,name,status,initial prompt")
inv, root := clitest.New(t, "exp", "task", "list", "--column", "id,name,status,initial prompt")
clitest.SetupConfig(t, memberClient, root)
pty := ptytest.New(t).Attach(inv)
@@ -122,7 +180,7 @@ func TestExpTaskList(t *testing.T) {
pausedTask := makeAITask(t, db, owner.OrganizationID, owner.UserID, memberUser.ID, database.WorkspaceTransitionStop, "stop me please")
// Use JSON output to reliably validate filtering.
inv, root := clitest.New(t, "task", "list", "--status=paused", "--output=json")
inv, root := clitest.New(t, "exp", "task", "list", "--status=paused", "--output=json")
clitest.SetupConfig(t, memberClient, root)
ctx := testutil.Context(t, testutil.WaitShort)
@@ -153,7 +211,7 @@ func TestExpTaskList(t *testing.T) {
_ = makeAITask(t, db, owner.OrganizationID, owner.UserID, memberUser.ID, database.WorkspaceTransitionStart, "other-task")
task := makeAITask(t, db, owner.OrganizationID, owner.UserID, owner.UserID, database.WorkspaceTransitionStart, "me-task")
inv, root := clitest.New(t, "task", "list", "--user", "me")
inv, root := clitest.New(t, "exp", "task", "list", "--user", "me")
//nolint:gocritic // Owner client is intended here smoke test the member task not showing up.
clitest.SetupConfig(t, client, root)
@@ -180,7 +238,7 @@ func TestExpTaskList(t *testing.T) {
task2 := makeAITask(t, db, owner.OrganizationID, owner.UserID, memberUser.ID, database.WorkspaceTransitionStop, "stop me please")
// Given: We add the `--quiet` flag
inv, root := clitest.New(t, "task", "list", "--quiet")
inv, root := clitest.New(t, "exp", "task", "list", "--quiet")
clitest.SetupConfig(t, memberClient, root)
ctx := testutil.Context(t, testutil.WaitShort)
@@ -224,7 +282,7 @@ func TestExpTaskList_OwnerCanListOthers(t *testing.T) {
t.Parallel()
// As the owner, list only member A tasks.
inv, root := clitest.New(t, "task", "list", "--user", memberAUser.Username, "--output=json")
inv, root := clitest.New(t, "exp", "task", "list", "--user", memberAUser.Username, "--output=json")
//nolint:gocritic // Owner client is intended here to allow member tasks to be listed.
clitest.SetupConfig(t, ownerClient, root)
@@ -252,7 +310,7 @@ func TestExpTaskList_OwnerCanListOthers(t *testing.T) {
// As the owner, list all tasks to verify both member tasks are present.
// Use JSON output to reliably validate filtering.
inv, root := clitest.New(t, "task", "list", "--all", "--output=json")
inv, root := clitest.New(t, "exp", "task", "list", "--all", "--output=json")
//nolint:gocritic // Owner client is intended here to allow all tasks to be listed.
clitest.SetupConfig(t, ownerClient, root)
+4 -3
View File
@@ -28,7 +28,7 @@ func (r *RootCmd) taskLogs() *serpent.Command {
Long: FormatExamples(
Example{
Description: "Show logs for a given task.",
Command: "coder task logs task1",
Command: "coder exp task logs task1",
}),
Middleware: serpent.Chain(
serpent.RequireNArgs(1),
@@ -41,15 +41,16 @@ func (r *RootCmd) taskLogs() *serpent.Command {
var (
ctx = inv.Context()
exp = codersdk.NewExperimentalClient(client)
identifier = inv.Args[0]
)
task, err := client.TaskByIdentifier(ctx, identifier)
task, err := exp.TaskByIdentifier(ctx, identifier)
if err != nil {
return xerrors.Errorf("resolve task %q: %w", identifier, err)
}
logs, err := client.TaskLogs(ctx, codersdk.Me, task.ID)
logs, err := exp.TaskLogs(ctx, codersdk.Me, task.ID)
if err != nil {
return xerrors.Errorf("get task logs: %w", err)
}
@@ -46,7 +46,7 @@ func Test_TaskLogs(t *testing.T) {
userClient := client // user already has access to their own workspace
var stdout strings.Builder
inv, root := clitest.New(t, "task", "logs", task.Name, "--output", "json")
inv, root := clitest.New(t, "exp", "task", "logs", task.Name, "--output", "json")
inv.Stdout = &stdout
clitest.SetupConfig(t, userClient, root)
@@ -72,7 +72,7 @@ func Test_TaskLogs(t *testing.T) {
userClient := client
var stdout strings.Builder
inv, root := clitest.New(t, "task", "logs", task.ID.String(), "--output", "json")
inv, root := clitest.New(t, "exp", "task", "logs", task.ID.String(), "--output", "json")
inv.Stdout = &stdout
clitest.SetupConfig(t, userClient, root)
@@ -98,7 +98,7 @@ func Test_TaskLogs(t *testing.T) {
userClient := client
var stdout strings.Builder
inv, root := clitest.New(t, "task", "logs", task.ID.String())
inv, root := clitest.New(t, "exp", "task", "logs", task.ID.String())
inv.Stdout = &stdout
clitest.SetupConfig(t, userClient, root)
@@ -121,7 +121,7 @@ func Test_TaskLogs(t *testing.T) {
userClient, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
var stdout strings.Builder
inv, root := clitest.New(t, "task", "logs", "doesnotexist")
inv, root := clitest.New(t, "exp", "task", "logs", "doesnotexist")
inv.Stdout = &stdout
clitest.SetupConfig(t, userClient, root)
@@ -139,7 +139,7 @@ func Test_TaskLogs(t *testing.T) {
userClient, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
var stdout strings.Builder
inv, root := clitest.New(t, "task", "logs", uuid.Nil.String())
inv, root := clitest.New(t, "exp", "task", "logs", uuid.Nil.String())
inv.Stdout = &stdout
clitest.SetupConfig(t, userClient, root)
@@ -155,7 +155,7 @@ func Test_TaskLogs(t *testing.T) {
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskLogsErr(assert.AnError))
userClient := client
inv, root := clitest.New(t, "task", "logs", task.ID.String())
inv, root := clitest.New(t, "exp", "task", "logs", task.ID.String())
clitest.SetupConfig(t, userClient, root)
err := inv.WithContext(ctx).Run()
+5 -4
View File
@@ -17,10 +17,10 @@ func (r *RootCmd) taskSend() *serpent.Command {
Short: "Send input to a task",
Long: FormatExamples(Example{
Description: "Send direct input to a task.",
Command: "coder task send task1 \"Please also add unit tests\"",
Command: "coder exp task send task1 \"Please also add unit tests\"",
}, Example{
Description: "Send input from stdin to a task.",
Command: "echo \"Please also add unit tests\" | coder task send task1 --stdin",
Command: "echo \"Please also add unit tests\" | coder exp task send task1 --stdin",
}),
Middleware: serpent.RequireRangeArgs(1, 2),
Options: serpent.OptionSet{
@@ -39,6 +39,7 @@ func (r *RootCmd) taskSend() *serpent.Command {
var (
ctx = inv.Context()
exp = codersdk.NewExperimentalClient(client)
identifier = inv.Args[0]
taskInput string
@@ -59,12 +60,12 @@ func (r *RootCmd) taskSend() *serpent.Command {
taskInput = inv.Args[1]
}
task, err := client.TaskByIdentifier(ctx, identifier)
task, err := exp.TaskByIdentifier(ctx, identifier)
if err != nil {
return xerrors.Errorf("resolve task: %w", err)
}
if err = client.TaskSend(ctx, codersdk.Me, task.ID, codersdk.TaskSendRequest{Input: taskInput}); err != nil {
if err = exp.TaskSend(ctx, codersdk.Me, task.ID, codersdk.TaskSendRequest{Input: taskInput}); err != nil {
return xerrors.Errorf("send input to task: %w", err)
}
@@ -30,7 +30,7 @@ func Test_TaskSend(t *testing.T) {
userClient := client
var stdout strings.Builder
inv, root := clitest.New(t, "task", "send", task.Name, "carry on with the task")
inv, root := clitest.New(t, "exp", "task", "send", task.Name, "carry on with the task")
inv.Stdout = &stdout
clitest.SetupConfig(t, userClient, root)
@@ -46,7 +46,7 @@ func Test_TaskSend(t *testing.T) {
userClient := client
var stdout strings.Builder
inv, root := clitest.New(t, "task", "send", task.ID.String(), "carry on with the task")
inv, root := clitest.New(t, "exp", "task", "send", task.ID.String(), "carry on with the task")
inv.Stdout = &stdout
clitest.SetupConfig(t, userClient, root)
@@ -62,7 +62,7 @@ func Test_TaskSend(t *testing.T) {
userClient := client
var stdout strings.Builder
inv, root := clitest.New(t, "task", "send", task.Name, "--stdin")
inv, root := clitest.New(t, "exp", "task", "send", task.Name, "--stdin")
inv.Stdout = &stdout
inv.Stdin = strings.NewReader("carry on with the task")
clitest.SetupConfig(t, userClient, root)
@@ -80,7 +80,7 @@ func Test_TaskSend(t *testing.T) {
userClient, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
var stdout strings.Builder
inv, root := clitest.New(t, "task", "send", "doesnotexist", "some task input")
inv, root := clitest.New(t, "exp", "task", "send", "doesnotexist", "some task input")
inv.Stdout = &stdout
clitest.SetupConfig(t, userClient, root)
@@ -98,7 +98,7 @@ func Test_TaskSend(t *testing.T) {
userClient, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
var stdout strings.Builder
inv, root := clitest.New(t, "task", "send", uuid.Nil.String(), "some task input")
inv, root := clitest.New(t, "exp", "task", "send", uuid.Nil.String(), "some task input")
inv.Stdout = &stdout
clitest.SetupConfig(t, userClient, root)
@@ -114,7 +114,7 @@ func Test_TaskSend(t *testing.T) {
userClient, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskSendErr(t, assert.AnError))
var stdout strings.Builder
inv, root := clitest.New(t, "task", "send", task.Name, "some task input")
inv, root := clitest.New(t, "exp", "task", "send", task.Name, "some task input")
inv.Stdout = &stdout
clitest.SetupConfig(t, userClient, root)
@@ -47,11 +47,11 @@ func (r *RootCmd) taskStatus() *serpent.Command {
Long: FormatExamples(
Example{
Description: "Show the status of a given task.",
Command: "coder task status task1",
Command: "coder exp task status task1",
},
Example{
Description: "Watch the status of a given task until it completes (idle or stopped).",
Command: "coder task status task1 --watch",
Command: "coder exp task status task1 --watch",
},
),
Use: "status",
@@ -83,9 +83,10 @@ func (r *RootCmd) taskStatus() *serpent.Command {
}
ctx := i.Context()
exp := codersdk.NewExperimentalClient(client)
identifier := i.Args[0]
task, err := client.TaskByIdentifier(ctx, identifier)
task, err := exp.TaskByIdentifier(ctx, identifier)
if err != nil {
return err
}
@@ -106,7 +107,7 @@ func (r *RootCmd) taskStatus() *serpent.Command {
// TODO: implement streaming updates instead of polling
lastStatusRow := tsr
for range t.C {
task, err := client.TaskByID(ctx, task.ID)
task, err := exp.TaskByID(ctx, task.ID)
if err != nil {
return err
}
@@ -36,9 +36,17 @@ func Test_TaskStatus(t *testing.T) {
hf: func(ctx context.Context, _ time.Time) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/api/v2/tasks/me/doesnotexist":
httpapi.ResourceNotFound(w)
return
case "/api/experimental/tasks":
if r.URL.Query().Get("q") == "owner:\"me\"" {
httpapi.Write(ctx, w, http.StatusOK, struct {
Tasks []codersdk.Task `json:"tasks"`
Count int `json:"count"`
}{
Tasks: []codersdk.Task{},
Count: 0,
})
return
}
default:
t.Errorf("unexpected path: %s", r.URL.Path)
}
@@ -52,7 +60,35 @@ func Test_TaskStatus(t *testing.T) {
hf: func(ctx context.Context, now time.Time) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/api/v2/tasks/me/exists":
case "/api/experimental/tasks":
if r.URL.Query().Get("q") == "owner:\"me\"" {
httpapi.Write(ctx, w, http.StatusOK, struct {
Tasks []codersdk.Task `json:"tasks"`
Count int `json:"count"`
}{
Tasks: []codersdk.Task{{
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
Name: "exists",
OwnerName: "me",
WorkspaceStatus: codersdk.WorkspaceStatusRunning,
CreatedAt: now,
UpdatedAt: now,
CurrentState: &codersdk.TaskStateEntry{
State: codersdk.TaskStateWorking,
Timestamp: now,
Message: "Thinking furiously...",
},
WorkspaceAgentHealth: &codersdk.WorkspaceAgentHealth{
Healthy: true,
},
WorkspaceAgentLifecycle: ptr.Ref(codersdk.WorkspaceAgentLifecycleReady),
Status: codersdk.TaskStatusActive,
}},
Count: 1,
})
return
}
case "/api/experimental/tasks/me/11111111-1111-1111-1111-111111111111":
httpapi.Write(ctx, w, http.StatusOK, codersdk.Task{
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
WorkspaceStatus: codersdk.WorkspaceStatusRunning,
@@ -88,22 +124,31 @@ func Test_TaskStatus(t *testing.T) {
var calls atomic.Int64
return func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/api/v2/tasks/me/exists":
httpapi.Write(ctx, w, http.StatusOK, codersdk.Task{
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
Name: "exists",
OwnerName: "me",
WorkspaceStatus: codersdk.WorkspaceStatusPending,
CreatedAt: now.Add(-5 * time.Second),
UpdatedAt: now.Add(-5 * time.Second),
WorkspaceAgentHealth: &codersdk.WorkspaceAgentHealth{
Healthy: true,
},
WorkspaceAgentLifecycle: ptr.Ref(codersdk.WorkspaceAgentLifecycleReady),
Status: codersdk.TaskStatusPending,
})
return
case "/api/v2/tasks/me/11111111-1111-1111-1111-111111111111":
case "/api/experimental/tasks":
if r.URL.Query().Get("q") == "owner:\"me\"" {
// Return initial task state for --watch test
httpapi.Write(ctx, w, http.StatusOK, struct {
Tasks []codersdk.Task `json:"tasks"`
Count int `json:"count"`
}{
Tasks: []codersdk.Task{{
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
Name: "exists",
OwnerName: "me",
WorkspaceStatus: codersdk.WorkspaceStatusPending,
CreatedAt: now.Add(-5 * time.Second),
UpdatedAt: now.Add(-5 * time.Second),
WorkspaceAgentHealth: &codersdk.WorkspaceAgentHealth{
Healthy: true,
},
WorkspaceAgentLifecycle: ptr.Ref(codersdk.WorkspaceAgentLifecycleReady),
Status: codersdk.TaskStatusPending,
}},
Count: 1,
})
return
}
case "/api/experimental/tasks/me/11111111-1111-1111-1111-111111111111":
defer calls.Add(1)
switch calls.Load() {
case 0:
@@ -189,7 +234,6 @@ func Test_TaskStatus(t *testing.T) {
"owner_id": "00000000-0000-0000-0000-000000000000",
"owner_name": "me",
"name": "exists",
"display_name": "Task exists",
"template_id": "00000000-0000-0000-0000-000000000000",
"template_version_id": "00000000-0000-0000-0000-000000000000",
"template_name": "",
@@ -219,19 +263,40 @@ func Test_TaskStatus(t *testing.T) {
ts := time.Date(2025, 8, 26, 12, 34, 56, 0, time.UTC)
return func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/api/v2/tasks/me/exists":
case "/api/experimental/tasks":
if r.URL.Query().Get("q") == "owner:\"me\"" {
httpapi.Write(ctx, w, http.StatusOK, struct {
Tasks []codersdk.Task `json:"tasks"`
Count int `json:"count"`
}{
Tasks: []codersdk.Task{{
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
Name: "exists",
OwnerName: "me",
WorkspaceStatus: codersdk.WorkspaceStatusRunning,
CreatedAt: ts,
UpdatedAt: ts,
CurrentState: &codersdk.TaskStateEntry{
State: codersdk.TaskStateWorking,
Timestamp: ts.Add(time.Second),
Message: "Thinking furiously...",
},
WorkspaceAgentHealth: &codersdk.WorkspaceAgentHealth{
Healthy: true,
},
WorkspaceAgentLifecycle: ptr.Ref(codersdk.WorkspaceAgentLifecycleReady),
Status: codersdk.TaskStatusActive,
}},
Count: 1,
})
return
}
case "/api/experimental/tasks/me/11111111-1111-1111-1111-111111111111":
httpapi.Write(ctx, w, http.StatusOK, codersdk.Task{
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
Name: "exists",
DisplayName: "Task exists",
OwnerName: "me",
WorkspaceAgentHealth: &codersdk.WorkspaceAgentHealth{
Healthy: true,
},
WorkspaceAgentLifecycle: ptr.Ref(codersdk.WorkspaceAgentLifecycleReady),
WorkspaceStatus: codersdk.WorkspaceStatusRunning,
CreatedAt: ts,
UpdatedAt: ts,
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
WorkspaceStatus: codersdk.WorkspaceStatusRunning,
CreatedAt: ts,
UpdatedAt: ts,
CurrentState: &codersdk.TaskStateEntry{
State: codersdk.TaskStateWorking,
Timestamp: ts.Add(time.Second),
@@ -256,7 +321,7 @@ func Test_TaskStatus(t *testing.T) {
srv = httptest.NewServer(http.HandlerFunc(tc.hf(ctx, now)))
client = codersdk.New(testutil.MustURL(t, srv.URL))
sb = strings.Builder{}
args = []string{"task", "status", "--watch-interval", testutil.IntervalFast.String()}
args = []string{"exp", "task", "status", "--watch-interval", testutil.IntervalFast.String()}
)
t.Cleanup(srv.Close)
+25 -18
View File
@@ -53,6 +53,7 @@ func Test_Tasks(t *testing.T) {
taskName = strings.ReplaceAll(testutil.GetRandomName(t), "_", "-")
)
//nolint:paralleltest // The sub-tests of this test must be run sequentially.
for _, tc := range []struct {
name string
cmdArgs []string
@@ -60,14 +61,14 @@ func Test_Tasks(t *testing.T) {
}{
{
name: "create task",
cmdArgs: []string{"task", "create", "test task input for " + t.Name(), "--name", taskName, "--template", taskTpl.Name},
cmdArgs: []string{"exp", "task", "create", "test task input for " + t.Name(), "--name", taskName, "--template", taskTpl.Name},
assertFn: func(stdout string, userClient *codersdk.Client) {
require.Contains(t, stdout, taskName, "task name should be in output")
},
},
{
name: "list tasks after create",
cmdArgs: []string{"task", "list", "--output", "json"},
cmdArgs: []string{"exp", "task", "list", "--output", "json"},
assertFn: func(stdout string, userClient *codersdk.Client) {
var tasks []codersdk.Task
err := json.NewDecoder(strings.NewReader(stdout)).Decode(&tasks)
@@ -88,7 +89,7 @@ func Test_Tasks(t *testing.T) {
},
{
name: "get task status after create",
cmdArgs: []string{"task", "status", taskName, "--output", "json"},
cmdArgs: []string{"exp", "task", "status", taskName, "--output", "json"},
assertFn: func(stdout string, userClient *codersdk.Client) {
var task codersdk.Task
require.NoError(t, json.NewDecoder(strings.NewReader(stdout)).Decode(&task), "should unmarshal task status")
@@ -98,12 +99,12 @@ func Test_Tasks(t *testing.T) {
},
{
name: "send task message",
cmdArgs: []string{"task", "send", taskName, "hello"},
cmdArgs: []string{"exp", "task", "send", taskName, "hello"},
// Assertions for this happen in the fake agent API handler.
},
{
name: "read task logs",
cmdArgs: []string{"task", "logs", taskName, "--output", "json"},
cmdArgs: []string{"exp", "task", "logs", taskName, "--output", "json"},
assertFn: func(stdout string, userClient *codersdk.Client) {
var logs []codersdk.TaskLogEntry
require.NoError(t, json.NewDecoder(strings.NewReader(stdout)).Decode(&logs), "should unmarshal task logs")
@@ -118,11 +119,12 @@ func Test_Tasks(t *testing.T) {
},
{
name: "delete task",
cmdArgs: []string{"task", "delete", taskName, "--yes"},
cmdArgs: []string{"exp", "task", "delete", taskName, "--yes"},
assertFn: func(stdout string, userClient *codersdk.Client) {
// The task should eventually no longer show up in the list of tasks
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
tasks, err := userClient.Tasks(ctx, &codersdk.TasksFilter{})
expClient := codersdk.NewExperimentalClient(userClient)
tasks, err := expClient.Tasks(ctx, &codersdk.TasksFilter{})
if !assert.NoError(t, err) {
return false
}
@@ -133,15 +135,16 @@ func Test_Tasks(t *testing.T) {
},
},
} {
t.Logf("test case: %q", tc.name)
var stdout strings.Builder
inv, root := clitest.New(t, tc.cmdArgs...)
inv.Stdout = &stdout
clitest.SetupConfig(t, userClient, root)
require.NoError(t, inv.WithContext(ctx).Run(), tc.name)
if tc.assertFn != nil {
tc.assertFn(stdout.String(), userClient)
}
t.Run(tc.name, func(t *testing.T) {
var stdout strings.Builder
inv, root := clitest.New(t, tc.cmdArgs...)
inv.Stdout = &stdout
clitest.SetupConfig(t, userClient, root)
require.NoError(t, inv.WithContext(ctx).Run())
if tc.assertFn != nil {
tc.assertFn(stdout.String(), userClient)
}
})
}
}
@@ -247,7 +250,8 @@ func setupCLITaskTest(ctx context.Context, t *testing.T, agentAPIHandlers map[st
template := createAITaskTemplate(t, client, owner.OrganizationID, withSidebarURL(fakeAPI.URL()), withAgentToken(authToken))
wantPrompt := "test prompt"
task, err := userClient.CreateTask(ctx, codersdk.Me, codersdk.CreateTaskRequest{
exp := codersdk.NewExperimentalClient(userClient)
task, err := exp.CreateTask(ctx, codersdk.Me, codersdk.CreateTaskRequest{
TemplateVersionID: template.ActiveVersionID,
Input: wantPrompt,
Name: "test-task",
@@ -289,6 +293,7 @@ func createAITaskTemplate(t *testing.T, client *codersdk.Client, orgID uuid.UUID
{
Type: &proto.Response_Plan{
Plan: &proto.PlanComplete{
Parameters: []*proto.RichParameter{{Name: codersdk.AITaskPromptParameterName, Type: "string"}},
HasAiTasks: true,
},
},
@@ -323,7 +328,9 @@ func createAITaskTemplate(t *testing.T, client *codersdk.Client, orgID uuid.UUID
},
AiTasks: []*proto.AITask{
{
AppId: taskAppID.String(),
SidebarApp: &proto.AITaskSidebarApp{
Id: taskAppID.String(),
},
},
},
},
-426
View File
@@ -1,426 +0,0 @@
package cli_test
import (
"bytes"
"crypto/rand"
"encoding/binary"
"fmt"
"net/url"
"os"
"path"
"runtime"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/coder/coder/v2/cli"
"github.com/coder/coder/v2/cli/clitest"
"github.com/coder/coder/v2/cli/config"
"github.com/coder/coder/v2/cli/sessionstore"
"github.com/coder/coder/v2/coderd/coderdtest"
"github.com/coder/coder/v2/pty/ptytest"
"github.com/coder/serpent"
)
// keyringTestServiceName generates a unique service name for keyring tests
// using the test name and a nanosecond timestamp to prevent collisions.
func keyringTestServiceName(t *testing.T) string {
t.Helper()
var n uint32
err := binary.Read(rand.Reader, binary.BigEndian, &n)
if err != nil {
t.Fatal(err)
}
return fmt.Sprintf("%s_%v_%d", t.Name(), time.Now().UnixNano(), n)
}
type keyringTestEnv struct {
serviceName string
keyring sessionstore.Keyring
inv *serpent.Invocation
cfg config.Root
clientURL *url.URL
}
func setupKeyringTestEnv(t *testing.T, clientURL string, args ...string) keyringTestEnv {
t.Helper()
var root cli.RootCmd
cmd, err := root.Command(root.AGPL())
require.NoError(t, err)
serviceName := keyringTestServiceName(t)
root.WithKeyringServiceName(serviceName)
root.UseKeyringWithGlobalConfig()
inv, cfg := clitest.NewWithDefaultKeyringCommand(t, cmd, args...)
parsedURL, err := url.Parse(clientURL)
require.NoError(t, err)
backend := sessionstore.NewKeyringWithService(serviceName)
t.Cleanup(func() {
_ = backend.Delete(parsedURL)
})
return keyringTestEnv{serviceName, backend, inv, cfg, parsedURL}
}
func TestUseKeyring(t *testing.T) {
// Verify that the --use-keyring flag default opts into using a keyring backend
// for storing session tokens instead of plain text files.
t.Parallel()
t.Run("Login", func(t *testing.T) {
t.Parallel()
if runtime.GOOS != "windows" && runtime.GOOS != "darwin" {
t.Skip("keyring is not supported on this OS")
}
// Create a test server
client := coderdtest.New(t, nil)
coderdtest.CreateFirstUser(t, client)
// Create a pty for interactive prompts
pty := ptytest.New(t)
// Create CLI invocation which defaults to using the keyring
env := setupKeyringTestEnv(t, client.URL.String(),
"login",
"--force-tty",
"--no-open",
client.URL.String())
inv := env.inv
inv.Stdin = pty.Input()
inv.Stdout = pty.Output()
// Run login in background
doneChan := make(chan struct{})
go func() {
defer close(doneChan)
err := inv.Run()
assert.NoError(t, err)
}()
// Provide the token when prompted
pty.ExpectMatch("Paste your token here:")
pty.WriteLine(client.SessionToken())
pty.ExpectMatch("Welcome to Coder")
<-doneChan
// Verify that session file was NOT created (using keyring instead)
sessionFile := path.Join(string(env.cfg), "session")
_, err := os.Stat(sessionFile)
require.True(t, os.IsNotExist(err), "session file should not exist when using keyring")
// Verify that the credential IS stored in OS keyring
cred, err := env.keyring.Read(env.clientURL)
require.NoError(t, err, "credential should be stored in OS keyring")
require.Equal(t, client.SessionToken(), cred, "stored token should match login token")
})
t.Run("Logout", func(t *testing.T) {
t.Parallel()
if runtime.GOOS != "windows" && runtime.GOOS != "darwin" {
t.Skip("keyring is not supported on this OS")
}
// Create a test server
client := coderdtest.New(t, nil)
coderdtest.CreateFirstUser(t, client)
// Create a pty for interactive prompts
pty := ptytest.New(t)
// First, login with the keyring (default)
env := setupKeyringTestEnv(t, client.URL.String(),
"login",
"--force-tty",
"--no-open",
client.URL.String(),
)
loginInv := env.inv
loginInv.Stdin = pty.Input()
loginInv.Stdout = pty.Output()
doneChan := make(chan struct{})
go func() {
defer close(doneChan)
err := loginInv.Run()
assert.NoError(t, err)
}()
pty.ExpectMatch("Paste your token here:")
pty.WriteLine(client.SessionToken())
pty.ExpectMatch("Welcome to Coder")
<-doneChan
// Verify credential exists in OS keyring
cred, err := env.keyring.Read(env.clientURL)
require.NoError(t, err, "read credential should succeed before logout")
require.NotEmpty(t, cred, "credential should exist before logout")
// Now logout using the same keyring service name
var logoutRoot cli.RootCmd
logoutCmd, err := logoutRoot.Command(logoutRoot.AGPL())
require.NoError(t, err)
logoutRoot.WithKeyringServiceName(env.serviceName)
logoutRoot.UseKeyringWithGlobalConfig()
logoutInv, _ := clitest.NewWithDefaultKeyringCommand(t, logoutCmd,
"logout",
"--yes",
"--global-config", string(env.cfg),
)
var logoutOut bytes.Buffer
logoutInv.Stdout = &logoutOut
err = logoutInv.Run()
require.NoError(t, err, "logout should succeed")
// Verify the credential was deleted from OS keyring
_, err = env.keyring.Read(env.clientURL)
require.ErrorIs(t, err, os.ErrNotExist, "credential should be deleted from keyring after logout")
})
t.Run("DefaultFileStorage", func(t *testing.T) {
t.Parallel()
if runtime.GOOS != "linux" {
t.Skip("file storage is the default for Linux")
}
// Create a test server
client := coderdtest.New(t, nil)
coderdtest.CreateFirstUser(t, client)
// Create a pty for interactive prompts
pty := ptytest.New(t)
env := setupKeyringTestEnv(t, client.URL.String(),
"login",
"--force-tty",
"--no-open",
client.URL.String(),
)
inv := env.inv
inv.Stdin = pty.Input()
inv.Stdout = pty.Output()
doneChan := make(chan struct{})
go func() {
defer close(doneChan)
err := inv.Run()
assert.NoError(t, err)
}()
pty.ExpectMatch("Paste your token here:")
pty.WriteLine(client.SessionToken())
pty.ExpectMatch("Welcome to Coder")
<-doneChan
// Verify that session file WAS created (not using keyring)
sessionFile := path.Join(string(env.cfg), "session")
_, err := os.Stat(sessionFile)
require.NoError(t, err, "session file should exist when NOT using --use-keyring on Linux")
// Read and verify the token from file
content, err := os.ReadFile(sessionFile)
require.NoError(t, err, "should be able to read session file")
require.Equal(t, client.SessionToken(), string(content), "file should contain the session token")
})
t.Run("EnvironmentVariable", func(t *testing.T) {
t.Parallel()
// Create a test server
client := coderdtest.New(t, nil)
coderdtest.CreateFirstUser(t, client)
// Create a pty for interactive prompts
pty := ptytest.New(t)
// Login using CODER_USE_KEYRING environment variable set to disable keyring usage,
// which should have the same behavior on all platforms.
env := setupKeyringTestEnv(t, client.URL.String(),
"login",
"--force-tty",
"--no-open",
client.URL.String(),
)
inv := env.inv
inv.Stdin = pty.Input()
inv.Stdout = pty.Output()
inv.Environ.Set("CODER_USE_KEYRING", "false")
doneChan := make(chan struct{})
go func() {
defer close(doneChan)
err := inv.Run()
assert.NoError(t, err)
}()
pty.ExpectMatch("Paste your token here:")
pty.WriteLine(client.SessionToken())
pty.ExpectMatch("Welcome to Coder")
<-doneChan
// Verify that session file WAS created (not using keyring)
sessionFile := path.Join(string(env.cfg), "session")
_, err := os.Stat(sessionFile)
require.NoError(t, err, "session file should exist when CODER_USE_KEYRING set to false")
// Read and verify the token from file
content, err := os.ReadFile(sessionFile)
require.NoError(t, err, "should be able to read session file")
require.Equal(t, client.SessionToken(), string(content), "file should contain the session token")
})
t.Run("DisableKeyringWithFlag", func(t *testing.T) {
t.Parallel()
client := coderdtest.New(t, nil)
coderdtest.CreateFirstUser(t, client)
pty := ptytest.New(t)
// Login with --use-keyring=false to explicitly disable keyring usage, which
// should have the same behavior on all platforms.
env := setupKeyringTestEnv(t, client.URL.String(),
"login",
"--use-keyring=false",
"--force-tty",
"--no-open",
client.URL.String(),
)
inv := env.inv
inv.Stdin = pty.Input()
inv.Stdout = pty.Output()
doneChan := make(chan struct{})
go func() {
defer close(doneChan)
err := inv.Run()
assert.NoError(t, err)
}()
pty.ExpectMatch("Paste your token here:")
pty.WriteLine(client.SessionToken())
pty.ExpectMatch("Welcome to Coder")
<-doneChan
// Verify that session file WAS created (not using keyring)
sessionFile := path.Join(string(env.cfg), "session")
_, err := os.Stat(sessionFile)
require.NoError(t, err, "session file should exist when --use-keyring=false is specified")
// Read and verify the token from file
content, err := os.ReadFile(sessionFile)
require.NoError(t, err, "should be able to read session file")
require.Equal(t, client.SessionToken(), string(content), "file should contain the session token")
})
}
func TestUseKeyringUnsupportedOS(t *testing.T) {
// Verify that on unsupported operating systems, file-based storage is used
// automatically even when --use-keyring is set to true (the default).
t.Parallel()
// Only run this on an unsupported OS.
if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
t.Skipf("Skipping unsupported OS test on %s where keyring is supported", runtime.GOOS)
}
t.Run("LoginWithDefaultKeyring", func(t *testing.T) {
t.Parallel()
client := coderdtest.New(t, nil)
coderdtest.CreateFirstUser(t, client)
pty := ptytest.New(t)
env := setupKeyringTestEnv(t, client.URL.String(),
"login",
"--force-tty",
"--no-open",
client.URL.String(),
)
inv := env.inv
inv.Stdin = pty.Input()
inv.Stdout = pty.Output()
doneChan := make(chan struct{})
go func() {
defer close(doneChan)
err := inv.Run()
assert.NoError(t, err)
}()
pty.ExpectMatch("Paste your token here:")
pty.WriteLine(client.SessionToken())
pty.ExpectMatch("Welcome to Coder")
<-doneChan
// Verify that session file WAS created (automatic fallback to file storage)
sessionFile := path.Join(string(env.cfg), "session")
_, err := os.Stat(sessionFile)
require.NoError(t, err, "session file should exist due to automatic fallback to file storage")
content, err := os.ReadFile(sessionFile)
require.NoError(t, err, "should be able to read session file")
require.Equal(t, client.SessionToken(), string(content), "file should contain the session token")
})
t.Run("LogoutWithDefaultKeyring", func(t *testing.T) {
t.Parallel()
client := coderdtest.New(t, nil)
coderdtest.CreateFirstUser(t, client)
pty := ptytest.New(t)
// First login to create a session (will use file storage due to automatic fallback)
env := setupKeyringTestEnv(t, client.URL.String(),
"login",
"--force-tty",
"--no-open",
client.URL.String(),
)
loginInv := env.inv
loginInv.Stdin = pty.Input()
loginInv.Stdout = pty.Output()
doneChan := make(chan struct{})
go func() {
defer close(doneChan)
err := loginInv.Run()
assert.NoError(t, err)
}()
pty.ExpectMatch("Paste your token here:")
pty.WriteLine(client.SessionToken())
pty.ExpectMatch("Welcome to Coder")
<-doneChan
// Verify session file exists
sessionFile := path.Join(string(env.cfg), "session")
_, err := os.Stat(sessionFile)
require.NoError(t, err, "session file should exist before logout")
// Now logout - should succeed and delete the file
logoutEnv := setupKeyringTestEnv(t, client.URL.String(),
"logout",
"--yes",
"--global-config", string(env.cfg),
)
err = logoutEnv.inv.Run()
require.NoError(t, err, "logout should succeed with automatic file storage fallback")
_, err = os.Stat(sessionFile)
require.True(t, os.IsNotExist(err), "session file should be deleted after logout")
})
}
+5 -24
View File
@@ -19,7 +19,6 @@ import (
"github.com/coder/pretty"
"github.com/coder/coder/v2/cli/cliui"
"github.com/coder/coder/v2/cli/sessionstore"
"github.com/coder/coder/v2/coderd/userpassword"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/serpent"
@@ -115,11 +114,9 @@ func (r *RootCmd) loginWithPassword(
}
sessionToken := resp.SessionToken
err = r.ensureTokenBackend().Write(client.URL, sessionToken)
config := r.createConfig()
err = config.Session().Write(sessionToken)
if err != nil {
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
return errKeyringNotSupported
}
return xerrors.Errorf("write session token: %w", err)
}
@@ -152,15 +149,11 @@ func (r *RootCmd) login() *serpent.Command {
useTokenForSession bool
)
cmd := &serpent.Command{
Use: "login [<url>]",
Short: "Authenticate with Coder deployment",
Long: "By default, the session token is stored in the operating system keyring on " +
"macOS and Windows and a plain text file on Linux. Use the --use-keyring flag " +
"or CODER_USE_KEYRING environment variable to change the storage mechanism.",
Use: "login [<url>]",
Short: "Authenticate with Coder deployment",
Middleware: serpent.RequireRangeArgs(0, 1),
Handler: func(inv *serpent.Invocation) error {
ctx := inv.Context()
rawURL := ""
var urlSource string
@@ -205,15 +198,6 @@ func (r *RootCmd) login() *serpent.Command {
return err
}
// Check keyring availability before prompting the user for a token to fail fast.
if r.useKeyring {
backend := r.ensureTokenBackend()
_, err := backend.Read(client.URL)
if err != nil && xerrors.Is(err, sessionstore.ErrNotImplemented) {
return errKeyringNotSupported
}
}
hasFirstUser, err := client.HasFirstUser(ctx)
if err != nil {
return xerrors.Errorf("Failed to check server %q for first user, is the URL correct and is coder accessible from your browser? Error - has initial user: %w", serverURL.String(), err)
@@ -410,11 +394,8 @@ func (r *RootCmd) login() *serpent.Command {
}
config := r.createConfig()
err = r.ensureTokenBackend().Write(client.URL, sessionToken)
err = config.Session().Write(sessionToken)
if err != nil {
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
return errKeyringNotSupported
}
return xerrors.Errorf("write session token: %w", err)
}
err = config.URL().Write(serverURL.String())
+3 -8
View File
@@ -8,7 +8,6 @@ import (
"golang.org/x/xerrors"
"github.com/coder/coder/v2/cli/cliui"
"github.com/coder/coder/v2/cli/sessionstore"
"github.com/coder/serpent"
)
@@ -47,15 +46,11 @@ func (r *RootCmd) logout() *serpent.Command {
errors = append(errors, xerrors.Errorf("remove URL file: %w", err))
}
err = r.ensureTokenBackend().Delete(client.URL)
err = config.Session().Delete()
// Only throw error if the session configuration file is present,
// otherwise the user is already logged out, and we proceed
if err != nil && !xerrors.Is(err, os.ErrNotExist) {
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
errors = append(errors, errKeyringNotSupported)
} else {
errors = append(errors, xerrors.Errorf("remove session token: %w", err))
}
if err != nil && !os.IsNotExist(err) {
errors = append(errors, xerrors.Errorf("remove session file: %w", err))
}
err = config.Organization().Delete()
+11 -80
View File
@@ -37,7 +37,6 @@ import (
"github.com/coder/coder/v2/cli/cliui"
"github.com/coder/coder/v2/cli/config"
"github.com/coder/coder/v2/cli/gitauth"
"github.com/coder/coder/v2/cli/sessionstore"
"github.com/coder/coder/v2/cli/telemetry"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/codersdk/agentsdk"
@@ -55,8 +54,6 @@ var (
// ErrSilent is a sentinel error that tells the command handler to just exit with a non-zero error, but not print
// anything.
ErrSilent = xerrors.New("silent error")
errKeyringNotSupported = xerrors.New("keyring storage is not supported on this operating system; omit --use-keyring to use file-based storage")
)
const (
@@ -71,14 +68,12 @@ const (
varVerbose = "verbose"
varDisableDirect = "disable-direct-connections"
varDisableNetworkTelemetry = "disable-network-telemetry"
varUseKeyring = "use-keyring"
notLoggedInMessage = "You are not logged in. Try logging in using '%s login <url>'."
envNoVersionCheck = "CODER_NO_VERSION_WARNING"
envNoFeatureWarning = "CODER_NO_FEATURE_WARNING"
envSessionToken = "CODER_SESSION_TOKEN"
envUseKeyring = "CODER_USE_KEYRING"
//nolint:gosec
envAgentToken = "CODER_AGENT_TOKEN"
//nolint:gosec
@@ -104,7 +99,6 @@ func (r *RootCmd) CoreSubcommands() []*serpent.Command {
r.resetPassword(),
r.sharing(),
r.state(),
r.tasksCommand(),
r.templates(),
r.tokens(),
r.users(),
@@ -150,7 +144,7 @@ func (r *RootCmd) AGPLExperimental() []*serpent.Command {
r.mcpCommand(),
r.promptExample(),
r.rptyCommand(),
r.syncCommand(),
r.tasksCommand(),
r.boundary(),
}
}
@@ -480,17 +474,6 @@ func (r *RootCmd) Command(subcommands []*serpent.Command) (*serpent.Command, err
Value: serpent.BoolOf(&r.disableNetworkTelemetry),
Group: globalGroup,
},
{
Flag: varUseKeyring,
Env: envUseKeyring,
Description: "Store and retrieve session tokens using the operating system " +
"keyring. This flag is ignored and file-based storage is used when " +
"--global-config is set or keyring usage is not supported on the current " +
"platform. Set to false to force file-based storage on supported platforms.",
Default: "true",
Value: serpent.BoolOf(&r.useKeyring),
Group: globalGroup,
},
{
Flag: "debug-http",
Description: "Debug codersdk HTTP requests.",
@@ -525,7 +508,6 @@ func (r *RootCmd) Command(subcommands []*serpent.Command) (*serpent.Command, err
type RootCmd struct {
clientURL *url.URL
token string
tokenBackend sessionstore.Backend
globalConfig string
header []string
headerCommand string
@@ -537,12 +519,9 @@ type RootCmd struct {
disableDirect bool
debugHTTP bool
disableNetworkTelemetry bool
noVersionCheck bool
noFeatureWarning bool
useKeyring bool
keyringServiceName string
useKeyringWithGlobalConfig bool
disableNetworkTelemetry bool
noVersionCheck bool
noFeatureWarning bool
}
// InitClient creates and configures a new client with authentication, telemetry,
@@ -570,19 +549,14 @@ func (r *RootCmd) InitClient(inv *serpent.Invocation) (*codersdk.Client, error)
return nil, err
}
}
// Read the token stored on disk.
if r.token == "" {
tok, err := r.ensureTokenBackend().Read(r.clientURL)
r.token, err = conf.Session().Read()
// Even if there isn't a token, we don't care.
// Some API routes can be unauthenticated.
if err != nil && !xerrors.Is(err, os.ErrNotExist) {
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
return nil, errKeyringNotSupported
}
if err != nil && !os.IsNotExist(err) {
return nil, err
}
if tok != "" {
r.token = tok
}
}
// Configure HTTP client with transport wrappers
@@ -614,6 +588,7 @@ func (r *RootCmd) InitClient(inv *serpent.Invocation) (*codersdk.Client, error)
// This allows commands to run without requiring authentication, but still use auth if available.
func (r *RootCmd) TryInitClient(inv *serpent.Invocation) (*codersdk.Client, error) {
conf := r.createConfig()
var err error
// Read the client URL stored on disk.
if r.clientURL == nil || r.clientURL.String() == "" {
rawURL, err := conf.URL().Read()
@@ -630,19 +605,14 @@ func (r *RootCmd) TryInitClient(inv *serpent.Invocation) (*codersdk.Client, erro
}
}
}
// Read the token stored on disk.
if r.token == "" {
tok, err := r.ensureTokenBackend().Read(r.clientURL)
r.token, err = conf.Session().Read()
// Even if there isn't a token, we don't care.
// Some API routes can be unauthenticated.
if err != nil && !xerrors.Is(err, os.ErrNotExist) {
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
return nil, errKeyringNotSupported
}
if err != nil && !os.IsNotExist(err) {
return nil, err
}
if tok != "" {
r.token = tok
}
}
// Only configure the client if we have a URL
@@ -718,45 +688,6 @@ func (r *RootCmd) createUnauthenticatedClient(ctx context.Context, serverURL *ur
return client, nil
}
// ensureTokenBackend returns the session token storage backend, creating it if necessary.
// This must be called after flags are parsed so we can respect the value of the --use-keyring
// flag.
func (r *RootCmd) ensureTokenBackend() sessionstore.Backend {
if r.tokenBackend == nil {
// Checking for the --global-config directory being set is a bit wonky but necessary
// to allow extensions that invoke the CLI with this flag (e.g. VS code) to continue
// working without modification. In the future we should modify these extensions to
// either access the credential in the keyring (like Coder Desktop) or some other
// approach that doesn't rely on the session token being stored on disk.
assumeExtensionInUse := r.globalConfig != config.DefaultDir() && !r.useKeyringWithGlobalConfig
keyringSupported := runtime.GOOS == "windows" || runtime.GOOS == "darwin"
if r.useKeyring && !assumeExtensionInUse && keyringSupported {
serviceName := sessionstore.DefaultServiceName
if r.keyringServiceName != "" {
serviceName = r.keyringServiceName
}
r.tokenBackend = sessionstore.NewKeyringWithService(serviceName)
} else {
r.tokenBackend = sessionstore.NewFile(r.createConfig)
}
}
return r.tokenBackend
}
// WithKeyringServiceName sets a custom keyring service name for testing purposes.
// This allows tests to use isolated keyring storage while still exercising the
// genuine storage backend selection logic in ensureTokenBackend().
func (r *RootCmd) WithKeyringServiceName(serviceName string) {
r.keyringServiceName = serviceName
}
// UseKeyringWithGlobalConfig enables the use of the keyring storage backend
// when the --global-config directory is set. This is only intended as an override
// for tests, which require specifying the global config directory for test isolation.
func (r *RootCmd) UseKeyringWithGlobalConfig() {
r.useKeyringWithGlobalConfig = true
}
type AgentAuth struct {
// Agent Client config
agentToken string
-25
View File
@@ -72,31 +72,6 @@ func TestCommandHelp(t *testing.T) {
Name: "coder provisioner jobs list --output json",
Cmd: []string{"provisioner", "jobs", "list", "--output", "json"},
},
// TODO (SasSwart): Remove these once the sync commands are promoted out of experimental.
clitest.CommandHelpCase{
Name: "coder exp sync --help",
Cmd: []string{"exp", "sync", "--help"},
},
clitest.CommandHelpCase{
Name: "coder exp sync ping --help",
Cmd: []string{"exp", "sync", "ping", "--help"},
},
clitest.CommandHelpCase{
Name: "coder exp sync start --help",
Cmd: []string{"exp", "sync", "start", "--help"},
},
clitest.CommandHelpCase{
Name: "coder exp sync want --help",
Cmd: []string{"exp", "sync", "want", "--help"},
},
clitest.CommandHelpCase{
Name: "coder exp sync complete --help",
Cmd: []string{"exp", "sync", "complete", "--help"},
},
clitest.CommandHelpCase{
Name: "coder exp sync status --help",
Cmd: []string{"exp", "sync", "status", "--help"},
},
))
}
+12 -20
View File
@@ -1029,7 +1029,7 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
defer shutdownConns()
// Ensures that old database entries are cleaned up over time!
purger := dbpurge.New(ctx, logger.Named("dbpurge"), options.Database, options.DeploymentValues, quartz.NewReal())
purger := dbpurge.New(ctx, logger.Named("dbpurge"), options.Database, quartz.NewReal())
defer purger.Close()
// Updates workspace usage
@@ -1476,7 +1476,6 @@ func newProvisionerDaemon(
Listener: terraformServer,
Logger: provisionerLogger,
WorkDirectory: workDir,
Experiments: coderAPI.Experiments,
},
CachePath: tfDir,
Tracer: tracer,
@@ -2143,33 +2142,21 @@ func startBuiltinPostgres(ctx context.Context, cfg config.Root, logger slog.Logg
}
stdlibLogger := slog.Stdlib(ctx, logger.Named("postgres"), slog.LevelDebug)
// If the port is not defined, an available port will be found dynamically. This has
// implications in CI because here is no way to tell Postgres to use an ephemeral
// port, so to avoid flaky tests in CI we need to retry EmbeddedPostgres.Start in
// case of a race condition where the port we quickly listen on and close in
// embeddedPostgresURL() is not free by the time the embedded postgres starts up.
// The maximum retry attempts _should_ cover most cases where port conflicts occur
// in CI and cause flaky tests.
// If the port is not defined, an available port will be found dynamically.
maxAttempts := 1
_, err = cfg.PostgresPort().Read()
// Important: if retryPortDiscovery is changed to not include testing.Testing(),
// the retry logic below also needs to be updated to ensure we don't delete an
// existing database
retryPortDiscovery := errors.Is(err, os.ErrNotExist) && testing.Testing()
if retryPortDiscovery {
// There is no way to tell Postgres to use an ephemeral port, so in order to avoid
// flaky tests in CI we need to retry EmbeddedPostgres.Start in case of a race
// condition where the port we quickly listen on and close in embeddedPostgresURL()
// is not free by the time the embedded postgres starts up. This maximum_should
// cover most cases where port conflicts occur in CI and cause flaky tests.
maxAttempts = 3
}
var startErr error
for attempt := 0; attempt < maxAttempts; attempt++ {
if retryPortDiscovery && attempt > 0 {
// Clean up the data and runtime directories and the port file from the
// previous failed attempt to ensure a clean slate for the next attempt.
_ = os.RemoveAll(filepath.Join(cfg.PostgresPath(), "data"))
_ = os.RemoveAll(filepath.Join(cfg.PostgresPath(), "runtime"))
_ = cfg.PostgresPort().Delete()
}
// Ensure a password and port have been generated.
connectionURL, err := embeddedPostgresURL(cfg)
if err != nil {
@@ -2216,6 +2203,11 @@ func startBuiltinPostgres(ctx context.Context, cfg config.Root, logger slog.Logg
slog.F("port", pgPort),
slog.Error(startErr),
)
if retryPortDiscovery {
// Since a retry is needed, we wipe the port stored here at the beginning of the loop.
_ = cfg.PostgresPort().Delete()
}
}
return "", nil, xerrors.Errorf("failed to start built-in PostgreSQL after %d attempts. "+
-237
View File
@@ -1,237 +0,0 @@
// Package sessionstore provides CLI session token storage mechanisms.
// Operating system keyring storage is intended to have compatibility with other Coder
// applications (e.g. Coder Desktop, Coder provider for JetBrains Toolbox, etc) so that
// applications can read/write the same credential stored in the keyring.
//
// Note that we aren't using an existing Go package zalando/go-keyring here for a few
// reasons. 1) It prescribes the format of the target credential name in the OS keyrings,
// which makes our life difficult for compatibility with other Coder applications. 2)
// It uses init functions that make it difficult to test with. As a result, the OS
// keyring implementations may be adapted from zalando/go-keyring source (i.e. Windows).
package sessionstore
import (
"encoding/json"
"errors"
"net/url"
"os"
"strings"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/cli/config"
)
// Backend is a storage backend for session tokens.
type Backend interface {
// Read returns the session token for the given server URL or an error, if any. It
// will return os.ErrNotExist if no token exists for the given URL.
Read(serverURL *url.URL) (string, error)
// Write stores the session token for the given server URL.
Write(serverURL *url.URL, token string) error
// Delete removes the session token for the given server URL or an error, if any.
// It will return os.ErrNotExist error if no token exists to delete.
Delete(serverURL *url.URL) error
}
var (
// ErrSetDataTooBig is returned if `keyringProvider.Set` was called with too much data.
// On macOS: The combination of service, username & password should not exceed ~3000 bytes
// On Windows: The service is limited to 32KiB while the password is limited to 2560 bytes
ErrSetDataTooBig = xerrors.New("data passed to Set was too big")
// ErrNotImplemented represents when keyring usage is not implemented on the current
// operating system.
ErrNotImplemented = xerrors.New("not implemented")
)
const (
// DefaultServiceName is the service name used in keyrings for storing Coder CLI session
// tokens.
DefaultServiceName = "coder-v2-credentials"
)
// keyringProvider represents an operating system keyring. The expectation
// is these methods operate on the user/login keyring.
type keyringProvider interface {
// Set stores the given credential for a service name in the operating system
// keyring.
Set(service, credential string) error
// Get retrieves the credential from the keyring. It must return os.ErrNotExist
// if the credential is not found.
Get(service string) ([]byte, error)
// Delete deletes the credential from the keyring. It must return os.ErrNotExist
// if the credential is not found.
Delete(service string) error
}
// credential represents a single credential entry.
type credential struct {
CoderURL string `json:"coder_url"`
APIToken string `json:"api_token"`
}
// credentialsMap represents the JSON structure stored in the operating system keyring.
// It supports storing multiple credentials for different server URLs.
type credentialsMap map[string]credential
// normalizeHost returns a normalized version of the URL host for use as a map key.
func normalizeHost(u *url.URL) (string, error) {
if u == nil || u.Host == "" {
return "", xerrors.New("nil server URL")
}
return strings.TrimSpace(strings.ToLower(u.Host)), nil
}
// parseCredentialsJSON parses the JSON from the keyring into a credentialsMap.
func parseCredentialsJSON(jsonData []byte) (credentialsMap, error) {
if len(jsonData) == 0 {
return make(credentialsMap), nil
}
var creds credentialsMap
if err := json.Unmarshal(jsonData, &creds); err != nil {
return nil, xerrors.Errorf("unmarshal credentials: %w", err)
}
return creds, nil
}
// Keyring is a Backend that exclusively stores the session token in the operating
// system keyring. Happy path usage of this type should start with NewKeyring.
// It stores a JSON object in the keyring that supports multiple credentials for
// different server URLs, providing compatibility with Coder Desktop and other Coder
// applications.
type Keyring struct {
provider keyringProvider
serviceName string
}
// NewKeyringWithService creates a Keyring Backend that stores credentials under the
// specified service name. Generally, DefaultServiceName should be provided as the service
// name except in tests which may need parameterization to avoid conflicting keyring use.
func NewKeyringWithService(serviceName string) Keyring {
return Keyring{
provider: operatingSystemKeyring{},
serviceName: serviceName,
}
}
func (o Keyring) Read(serverURL *url.URL) (string, error) {
host, err := normalizeHost(serverURL)
if err != nil {
return "", err
}
credJSON, err := o.provider.Get(o.serviceName)
if err != nil {
return "", err
}
if len(credJSON) == 0 {
return "", os.ErrNotExist
}
creds, err := parseCredentialsJSON(credJSON)
if err != nil {
return "", xerrors.Errorf("read: parse existing credentials: %w", err)
}
// Return the credential for the specified URL
cred, ok := creds[host]
if !ok {
return "", os.ErrNotExist
}
return cred.APIToken, nil
}
func (o Keyring) Write(serverURL *url.URL, token string) error {
host, err := normalizeHost(serverURL)
if err != nil {
return err
}
existingJSON, err := o.provider.Get(o.serviceName)
if err != nil && !errors.Is(err, os.ErrNotExist) {
return xerrors.Errorf("read existing credentials: %w", err)
}
creds, err := parseCredentialsJSON(existingJSON)
if err != nil {
return xerrors.Errorf("write: parse existing credentials: %w", err)
}
// Upsert the credential for this URL.
creds[host] = credential{
CoderURL: host,
APIToken: token,
}
credsJSON, err := json.Marshal(creds)
if err != nil {
return xerrors.Errorf("marshal credentials: %w", err)
}
err = o.provider.Set(o.serviceName, string(credsJSON))
if err != nil {
return xerrors.Errorf("write credentials to keyring: %w", err)
}
return nil
}
func (o Keyring) Delete(serverURL *url.URL) error {
host, err := normalizeHost(serverURL)
if err != nil {
return err
}
existingJSON, err := o.provider.Get(o.serviceName)
if err != nil {
return err
}
creds, err := parseCredentialsJSON(existingJSON)
if err != nil {
return xerrors.Errorf("failed to parse existing credentials: %w", err)
}
if _, ok := creds[host]; !ok {
return os.ErrNotExist
}
delete(creds, host)
// Delete the entire keyring entry when no credentials remain.
if len(creds) == 0 {
return o.provider.Delete(o.serviceName)
}
// Write back the updated credentials map.
credsJSON, err := json.Marshal(creds)
if err != nil {
return xerrors.Errorf("failed to marshal credentials: %w", err)
}
return o.provider.Set(o.serviceName, string(credsJSON))
}
// File is a Backend that exclusively stores the session token in a file on disk.
type File struct {
config func() config.Root
}
func NewFile(f func() config.Root) *File {
return &File{config: f}
}
func (f *File) Read(_ *url.URL) (string, error) {
return f.config().Session().Read()
}
func (f *File) Write(_ *url.URL, token string) error {
return f.config().Session().Write(token)
}
func (f *File) Delete(_ *url.URL) error {
return f.config().Session().Delete()
}
-105
View File
@@ -1,105 +0,0 @@
//go:build darwin
package sessionstore
import (
"encoding/base64"
"fmt"
"io"
"os"
"os/exec"
"regexp"
"strings"
)
const (
// fixedUsername is the fixed username used for all keychain entries.
// Since our interface only uses service names, we use a constant username.
fixedUsername = "coder-login-credentials"
execPathKeychain = "/usr/bin/security"
notFoundStr = "could not be found"
)
// operatingSystemKeyring implements keyringProvider for macOS.
// It is largely adapted from the zalando/go-keyring package.
type operatingSystemKeyring struct{}
func (operatingSystemKeyring) Set(service, credential string) error {
// if the added secret has multiple lines or some non ascii,
// macOS will hex encode it on return. To avoid getting garbage, we
// encode all passwords
password := base64.StdEncoding.EncodeToString([]byte(credential))
cmd := exec.Command(execPathKeychain, "-i")
stdIn, err := cmd.StdinPipe()
if err != nil {
return err
}
if err = cmd.Start(); err != nil {
return err
}
command := fmt.Sprintf("add-generic-password -U -s %s -a %s -w %s\n",
shellEscape(service),
shellEscape(fixedUsername),
shellEscape(password))
if len(command) > 4096 {
return ErrSetDataTooBig
}
if _, err := io.WriteString(stdIn, command); err != nil {
return err
}
if err = stdIn.Close(); err != nil {
return err
}
return cmd.Wait()
}
func (operatingSystemKeyring) Get(service string) ([]byte, error) {
out, err := exec.Command(
execPathKeychain,
"find-generic-password",
"-s", service,
"-wa", fixedUsername).CombinedOutput()
if err != nil {
if strings.Contains(string(out), notFoundStr) {
return nil, os.ErrNotExist
}
return nil, err
}
trimStr := strings.TrimSpace(string(out))
return base64.StdEncoding.DecodeString(trimStr)
}
func (operatingSystemKeyring) Delete(service string) error {
out, err := exec.Command(
execPathKeychain,
"delete-generic-password",
"-s", service,
"-a", fixedUsername).CombinedOutput()
if strings.Contains(string(out), notFoundStr) {
return os.ErrNotExist
}
return err
}
// shellEscape returns a shell-escaped version of the string s.
// This is adapted from github.com/zalando/go-keyring/internal/shellescape.
func shellEscape(s string) string {
if len(s) == 0 {
return "''"
}
pattern := regexp.MustCompile(`[^\w@%+=:,./-]`)
if pattern.MatchString(s) {
return "'" + strings.ReplaceAll(s, "'", "'\"'\"'") + "'"
}
return s
}
@@ -1,34 +0,0 @@
//go:build darwin
package sessionstore_test
import (
"encoding/base64"
"os/exec"
"testing"
)
const (
execPathKeychain = "/usr/bin/security"
fixedUsername = "coder-login-credentials"
)
func readRawKeychainCredential(t *testing.T, service string) []byte {
t.Helper()
out, err := exec.Command(
execPathKeychain,
"find-generic-password",
"-s", service,
"-wa", fixedUsername).CombinedOutput()
if err != nil {
t.Fatal(err)
}
dst := make([]byte, base64.StdEncoding.DecodedLen(len(out)))
n, err := base64.StdEncoding.Decode(dst, out)
if err != nil {
t.Fatal(err)
}
return dst[:n]
}
@@ -1,121 +0,0 @@
package sessionstore
import (
"encoding/json"
"net/url"
"testing"
"github.com/stretchr/testify/require"
)
func TestNormalizeHost(t *testing.T) {
t.Parallel()
tests := []struct {
name string
url *url.URL
want string
wantErr bool
}{
{
name: "StandardHost",
url: &url.URL{Host: "coder.example.com"},
want: "coder.example.com",
},
{
name: "HostWithPort",
url: &url.URL{Host: "coder.example.com:8080"},
want: "coder.example.com:8080",
},
{
name: "UppercaseHost",
url: &url.URL{Host: "CODER.EXAMPLE.COM"},
want: "coder.example.com",
},
{
name: "HostWithWhitespace",
url: &url.URL{Host: " coder.example.com "},
want: "coder.example.com",
},
{
name: "NilURL",
url: nil,
want: "",
wantErr: true,
},
{
name: "EmptyHost",
url: &url.URL{Host: ""},
want: "",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := normalizeHost(tt.url)
if tt.wantErr {
require.Error(t, err)
return
}
require.NoError(t, err)
require.Equal(t, tt.want, got)
})
}
}
func TestParseCredentialsJSON(t *testing.T) {
t.Parallel()
t.Run("Empty", func(t *testing.T) {
t.Parallel()
creds, err := parseCredentialsJSON(nil)
require.NoError(t, err)
require.NotNil(t, creds)
require.Empty(t, creds)
})
t.Run("NewFormat", func(t *testing.T) {
t.Parallel()
jsonData := []byte(`{
"coder1.example.com": {"coder_url": "coder1.example.com", "api_token": "token1"},
"coder2.example.com": {"coder_url": "coder2.example.com", "api_token": "token2"}
}`)
creds, err := parseCredentialsJSON(jsonData)
require.NoError(t, err)
require.Len(t, creds, 2)
require.Equal(t, "token1", creds["coder1.example.com"].APIToken)
require.Equal(t, "token2", creds["coder2.example.com"].APIToken)
})
t.Run("InvalidJSON", func(t *testing.T) {
t.Parallel()
jsonData := []byte(`{invalid json}`)
_, err := parseCredentialsJSON(jsonData)
require.Error(t, err)
})
}
func TestCredentialsMap_RoundTrip(t *testing.T) {
t.Parallel()
creds := credentialsMap{
"coder1.example.com": {
CoderURL: "coder1.example.com",
APIToken: "token1",
},
"coder2.example.com:8080": {
CoderURL: "coder2.example.com:8080",
APIToken: "token2",
},
}
jsonData, err := json.Marshal(creds)
require.NoError(t, err)
parsed, err := parseCredentialsJSON(jsonData)
require.NoError(t, err)
require.Equal(t, creds, parsed)
}
-17
View File
@@ -1,17 +0,0 @@
//go:build !windows && !darwin
package sessionstore
type operatingSystemKeyring struct{}
func (operatingSystemKeyring) Set(_, _ string) error {
return ErrNotImplemented
}
func (operatingSystemKeyring) Get(_ string) ([]byte, error) {
return nil, ErrNotImplemented
}
func (operatingSystemKeyring) Delete(_ string) error {
return ErrNotImplemented
}
@@ -1,10 +0,0 @@
//go:build !windows && !darwin
package sessionstore_test
import "testing"
func readRawKeychainCredential(t *testing.T, _ string) []byte {
t.Fatal("not implemented")
return nil
}
-408
View File
@@ -1,408 +0,0 @@
package sessionstore_test
import (
"encoding/json"
"errors"
"fmt"
"net/url"
"os"
"path"
"runtime"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/coder/coder/v2/cli/config"
"github.com/coder/coder/v2/cli/sessionstore"
)
type storedCredentials map[string]struct {
CoderURL string `json:"coder_url"`
APIToken string `json:"api_token"`
}
// Generate a test service name for use with the OS keyring. It uses a combination
// of the test name and a nanosecond timestamp to prevent collisions.
func keyringTestServiceName(t *testing.T) string {
t.Helper()
return t.Name() + "_" + fmt.Sprintf("%v", time.Now().UnixNano())
}
func TestKeyring(t *testing.T) {
t.Parallel()
if runtime.GOOS != "windows" && runtime.GOOS != "darwin" {
t.Skip("linux is not supported yet")
}
// This test exercises use of the operating system keyring. As a result,
// the operating system keyring is expected to be available.
const (
testURL = "http://127.0.0.1:1337"
testURL2 = "http://127.0.0.1:1338"
)
t.Run("ReadNonExistent", func(t *testing.T) {
t.Parallel()
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
srvURL, err := url.Parse(testURL)
require.NoError(t, err)
t.Cleanup(func() { _ = backend.Delete(srvURL) })
_, err = backend.Read(srvURL)
require.Error(t, err)
require.True(t, os.IsNotExist(err), "expected os.ErrNotExist when reading non-existent token")
})
t.Run("DeleteNonExistent", func(t *testing.T) {
t.Parallel()
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
srvURL, err := url.Parse(testURL)
require.NoError(t, err)
t.Cleanup(func() { _ = backend.Delete(srvURL) })
err = backend.Delete(srvURL)
require.Error(t, err)
require.True(t, errors.Is(err, os.ErrNotExist), "expected os.ErrNotExist when deleting non-existent token")
})
t.Run("WriteAndRead", func(t *testing.T) {
t.Parallel()
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
srvURL, err := url.Parse(testURL)
require.NoError(t, err)
t.Cleanup(func() { _ = backend.Delete(srvURL) })
dir := t.TempDir()
expSessionFile := path.Join(dir, "session")
const inputToken = "test-keyring-token-12345"
err = backend.Write(srvURL, inputToken)
require.NoError(t, err)
// Verify no session file was created (keyring stores in OS keyring, not file)
_, err = os.Stat(expSessionFile)
require.True(t, errors.Is(err, os.ErrNotExist), "expected session token file to not exist when using keyring")
token, err := backend.Read(srvURL)
require.NoError(t, err)
require.Equal(t, inputToken, token)
// Clean up
err = backend.Delete(srvURL)
require.NoError(t, err)
})
t.Run("WriteAndDelete", func(t *testing.T) {
t.Parallel()
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
srvURL, err := url.Parse(testURL)
require.NoError(t, err)
t.Cleanup(func() { _ = backend.Delete(srvURL) })
const inputToken = "test-keyring-token-67890"
err = backend.Write(srvURL, inputToken)
require.NoError(t, err)
token, err := backend.Read(srvURL)
require.NoError(t, err)
require.Equal(t, inputToken, token)
err = backend.Delete(srvURL)
require.NoError(t, err)
_, err = backend.Read(srvURL)
require.Error(t, err)
require.True(t, os.IsNotExist(err), "expected os.ErrNotExist after deleting token")
})
t.Run("OverwriteToken", func(t *testing.T) {
t.Parallel()
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
srvURL, err := url.Parse(testURL)
require.NoError(t, err)
t.Cleanup(func() { _ = backend.Delete(srvURL) })
// Write first token
const firstToken = "first-keyring-token"
err = backend.Write(srvURL, firstToken)
require.NoError(t, err)
token, err := backend.Read(srvURL)
require.NoError(t, err)
require.Equal(t, firstToken, token)
// Overwrite with second token
const secondToken = "second-keyring-token"
err = backend.Write(srvURL, secondToken)
require.NoError(t, err)
token, err = backend.Read(srvURL)
require.NoError(t, err)
require.Equal(t, secondToken, token)
// Clean up
err = backend.Delete(srvURL)
require.NoError(t, err)
})
t.Run("MultipleServers", func(t *testing.T) {
t.Parallel()
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
srvURL, err := url.Parse(testURL)
require.NoError(t, err)
srvURL2, err := url.Parse(testURL2)
require.NoError(t, err)
t.Cleanup(func() {
_ = backend.Delete(srvURL)
_ = backend.Delete(srvURL2)
})
// Write token for server 1
const token1 = "token-for-server-1"
err = backend.Write(srvURL, token1)
require.NoError(t, err)
// Write token for server 2 (should NOT overwrite server 1)
const token2 = "token-for-server-2"
err = backend.Write(srvURL2, token2)
require.NoError(t, err)
// Read server 1's credential
token, err := backend.Read(srvURL)
require.NoError(t, err)
require.Equal(t, token1, token)
// Read server 2's credential
token, err = backend.Read(srvURL2)
require.NoError(t, err)
require.Equal(t, token2, token)
// Delete server 1's credential
err = backend.Delete(srvURL)
require.NoError(t, err)
// Verify server 1's credential is gone
_, err = backend.Read(srvURL)
require.Error(t, err)
require.True(t, os.IsNotExist(err))
// Verify server 2's credential still exists
token, err = backend.Read(srvURL2)
require.NoError(t, err)
require.Equal(t, token2, token)
// Clean up remaining credentials
err = backend.Delete(srvURL2)
require.NoError(t, err)
})
t.Run("StorageFormat", func(t *testing.T) {
t.Parallel()
// The storage format must remain consistent to ensure we don't break
// compatibility with other Coder related applications that may read
// or decode the same credential.
const testURL1 = "http://127.0.0.1:1337"
srv1URL, err := url.Parse(testURL1)
require.NoError(t, err)
const testURL2 = "http://127.0.0.1:1338"
srv2URL, err := url.Parse(testURL2)
require.NoError(t, err)
serviceName := keyringTestServiceName(t)
backend := sessionstore.NewKeyringWithService(serviceName)
t.Cleanup(func() {
_ = backend.Delete(srv1URL)
_ = backend.Delete(srv2URL)
})
// Write token for server 1
const token1 = "token-server-1"
err = backend.Write(srv1URL, token1)
require.NoError(t, err)
// Write token for server 2 (should NOT overwrite server 1's token)
const token2 = "token-server-2"
err = backend.Write(srv2URL, token2)
require.NoError(t, err)
// Verify both credentials are stored in the raw format and can
// be extracted through the Backend API.
rawCredential := readRawKeychainCredential(t, serviceName)
storedCreds := make(storedCredentials)
err = json.Unmarshal(rawCredential, &storedCreds)
require.NoError(t, err, "unmarshalling stored credentials")
// Both credentials should exist
require.Len(t, storedCreds, 2)
require.Equal(t, token1, storedCreds[srv1URL.Host].APIToken)
require.Equal(t, token2, storedCreds[srv2URL.Host].APIToken)
// Read individual credentials
token, err := backend.Read(srv1URL)
require.NoError(t, err)
require.Equal(t, token1, token)
token, err = backend.Read(srv2URL)
require.NoError(t, err)
require.Equal(t, token2, token)
// Cleanup
err = backend.Delete(srv1URL)
require.NoError(t, err)
err = backend.Delete(srv2URL)
require.NoError(t, err)
})
}
func TestFile(t *testing.T) {
const (
testURL = "http://127.0.0.1:1337"
testURL2 = "http://127.0.0.1:1338"
)
t.Parallel()
t.Run("ReadNonExistent", func(t *testing.T) {
t.Parallel()
dir := t.TempDir()
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
srvURL, err := url.Parse(testURL)
require.NoError(t, err)
_, err = backend.Read(srvURL)
require.Error(t, err)
require.True(t, os.IsNotExist(err))
})
t.Run("WriteAndRead", func(t *testing.T) {
t.Parallel()
dir := t.TempDir()
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
srvURL, err := url.Parse(testURL)
require.NoError(t, err)
// Write a token
const inputToken = "test-token-12345"
err = backend.Write(srvURL, inputToken)
require.NoError(t, err)
// Verify the session file was created
sessionFile := config.Root(dir).Session()
require.True(t, sessionFile.Exists())
// Read the token back
token, err := backend.Read(srvURL)
require.NoError(t, err)
require.Equal(t, inputToken, token)
})
t.Run("WriteAndDelete", func(t *testing.T) {
t.Parallel()
dir := t.TempDir()
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
srvURL, err := url.Parse(testURL)
require.NoError(t, err)
// Write a token
const inputToken = "test-token-67890"
err = backend.Write(srvURL, inputToken)
require.NoError(t, err)
// Verify the token was written
token, err := backend.Read(srvURL)
require.NoError(t, err)
require.Equal(t, inputToken, token)
// Delete the token
err = backend.Delete(srvURL)
require.NoError(t, err)
// Verify the token is gone
_, err = backend.Read(srvURL)
require.Error(t, err)
require.True(t, os.IsNotExist(err))
})
t.Run("DeleteNonExistent", func(t *testing.T) {
t.Parallel()
dir := t.TempDir()
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
srvURL, err := url.Parse(testURL)
require.NoError(t, err)
// Attempt to delete a non-existent token
err = backend.Delete(srvURL)
require.Error(t, err)
require.True(t, os.IsNotExist(err))
})
t.Run("OverwriteToken", func(t *testing.T) {
t.Parallel()
dir := t.TempDir()
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
srvURL, err := url.Parse(testURL)
require.NoError(t, err)
// Write first token
const firstToken = "first-token"
err = backend.Write(srvURL, firstToken)
require.NoError(t, err)
token, err := backend.Read(srvURL)
require.NoError(t, err)
require.Equal(t, firstToken, token)
// Overwrite with second token
const secondToken = "second-token"
err = backend.Write(srvURL, secondToken)
require.NoError(t, err)
token, err = backend.Read(srvURL)
require.NoError(t, err)
require.Equal(t, secondToken, token)
})
t.Run("WriteIgnoresURL", func(t *testing.T) {
t.Parallel()
dir := t.TempDir()
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
srvURL, err := url.Parse(testURL)
require.NoError(t, err)
srvURL2, err := url.Parse(testURL2)
require.NoError(t, err)
//nolint:gosec // Write with first URL test token
const firstToken = "token-for-url1"
err = backend.Write(srvURL, firstToken)
require.NoError(t, err)
//nolint:gosec // Write with second URL - should overwrite
const secondToken = "token-for-url2"
err = backend.Write(srvURL2, secondToken)
require.NoError(t, err)
// Should have the second token (File backend doesn't differentiate by URL)
token, err := backend.Read(srvURL)
require.NoError(t, err)
require.Equal(t, secondToken, token)
})
}
-60
View File
@@ -1,60 +0,0 @@
//go:build windows
package sessionstore
import (
"errors"
"os"
"syscall"
"github.com/danieljoos/wincred"
)
// operatingSystemKeyring implements keyringProvider and uses Windows Credential Manager.
// It is largely adapted from the zalando/go-keyring package.
type operatingSystemKeyring struct{}
func (operatingSystemKeyring) Set(service, credential string) error {
// password may not exceed 2560 bytes (https://github.com/jaraco/keyring/issues/540#issuecomment-968329967)
if len(credential) > 2560 {
return ErrSetDataTooBig
}
// service may not exceed 512 bytes (might need more testing)
if len(service) >= 512 {
return ErrSetDataTooBig
}
// service may not exceed 32k but problems occur before that
// so we limit it to 30k
if len(service) > 1024*30 {
return ErrSetDataTooBig
}
cred := wincred.NewGenericCredential(service)
cred.CredentialBlob = []byte(credential)
cred.Persist = wincred.PersistLocalMachine
return cred.Write()
}
func (operatingSystemKeyring) Get(service string) ([]byte, error) {
cred, err := wincred.GetGenericCredential(service)
if err != nil {
if errors.Is(err, syscall.ERROR_NOT_FOUND) {
return nil, os.ErrNotExist
}
return nil, err
}
return cred.CredentialBlob, nil
}
func (operatingSystemKeyring) Delete(service string) error {
cred, err := wincred.GetGenericCredential(service)
if err != nil {
if errors.Is(err, syscall.ERROR_NOT_FOUND) {
return os.ErrNotExist
}
return err
}
return cred.Delete()
}
@@ -1,74 +0,0 @@
//go:build windows
package sessionstore_test
import (
"encoding/json"
"net/url"
"os"
"testing"
"github.com/danieljoos/wincred"
"github.com/stretchr/testify/require"
"github.com/coder/coder/v2/cli/sessionstore"
)
func readRawKeychainCredential(t *testing.T, serviceName string) []byte {
t.Helper()
winCred, err := wincred.GetGenericCredential(serviceName)
if err != nil {
t.Fatal(err)
}
return winCred.CredentialBlob
}
func TestWindowsKeyring_WriteReadDelete(t *testing.T) {
t.Parallel()
const testURL = "http://127.0.0.1:1337"
srvURL, err := url.Parse(testURL)
require.NoError(t, err)
serviceName := keyringTestServiceName(t)
backend := sessionstore.NewKeyringWithService(serviceName)
t.Cleanup(func() { _ = backend.Delete(srvURL) })
// Verify no token exists initially
_, err = backend.Read(srvURL)
require.ErrorIs(t, err, os.ErrNotExist)
// Write a token
const inputToken = "test-token-12345"
err = backend.Write(srvURL, inputToken)
require.NoError(t, err)
// Verify the credential is stored in Windows Credential Manager with correct format
winCred, err := wincred.GetGenericCredential(serviceName)
require.NoError(t, err, "getting windows credential")
storedCreds := make(storedCredentials)
err = json.Unmarshal(winCred.CredentialBlob, &storedCreds)
require.NoError(t, err, "unmarshalling stored credentials")
// Verify the stored values
require.Len(t, storedCreds, 1)
cred, ok := storedCreds[srvURL.Host]
require.True(t, ok, "credential for URL should exist")
require.Equal(t, inputToken, cred.APIToken)
require.Equal(t, srvURL.Host, cred.CoderURL)
// Read the token back
token, err := backend.Read(srvURL)
require.NoError(t, err)
require.Equal(t, inputToken, token)
// Delete the token
err = backend.Delete(srvURL)
require.NoError(t, err)
// Verify token is deleted
_, err = backend.Read(srvURL)
require.ErrorIs(t, err, os.ErrNotExist)
}
-35
View File
@@ -1,35 +0,0 @@
package cli
import (
"github.com/coder/serpent"
)
func (r *RootCmd) syncCommand() *serpent.Command {
var socketPath string
cmd := &serpent.Command{
Use: "sync",
Short: "Manage unit dependencies for coordinated startup",
Long: "Commands for orchestrating unit startup order in workspaces. Units are most commonly coder scripts. Use these commands to declare dependencies between units, coordinate their startup sequence, and ensure units start only after their dependencies are ready. This helps prevent race conditions and startup failures.",
Handler: func(i *serpent.Invocation) error {
return i.Command.HelpHandler(i)
},
Children: []*serpent.Command{
r.syncPing(&socketPath),
r.syncStart(&socketPath),
r.syncWant(&socketPath),
r.syncComplete(&socketPath),
r.syncStatus(&socketPath),
},
Options: serpent.OptionSet{
{
Flag: "socket-path",
Env: "CODER_AGENT_SOCKET_PATH",
Description: "Specify the path for the agent socket.",
Value: serpent.StringOf(&socketPath),
},
},
}
return cmd
}
-47
View File
@@ -1,47 +0,0 @@
package cli
import (
"golang.org/x/xerrors"
"github.com/coder/coder/v2/agent/agentsocket"
"github.com/coder/coder/v2/agent/unit"
"github.com/coder/coder/v2/cli/cliui"
"github.com/coder/serpent"
)
func (*RootCmd) syncComplete(socketPath *string) *serpent.Command {
cmd := &serpent.Command{
Use: "complete <unit>",
Short: "Mark a unit as complete",
Long: "Mark a unit as complete. Indicating to other units that it has completed its work. This allows units that depend on it to proceed with their startup.",
Handler: func(i *serpent.Invocation) error {
ctx := i.Context()
if len(i.Args) != 1 {
return xerrors.New("exactly one unit name is required")
}
unit := unit.ID(i.Args[0])
opts := []agentsocket.Option{}
if *socketPath != "" {
opts = append(opts, agentsocket.WithPath(*socketPath))
}
client, err := agentsocket.NewClient(ctx, opts...)
if err != nil {
return xerrors.Errorf("connect to agent socket: %w", err)
}
defer client.Close()
if err := client.SyncComplete(ctx, unit); err != nil {
return xerrors.Errorf("complete unit failed: %w", err)
}
cliui.Info(i.Stdout, "Success")
return nil
},
}
return cmd
}
-42
View File
@@ -1,42 +0,0 @@
package cli
import (
"golang.org/x/xerrors"
"github.com/coder/coder/v2/agent/agentsocket"
"github.com/coder/coder/v2/cli/cliui"
"github.com/coder/serpent"
)
func (*RootCmd) syncPing(socketPath *string) *serpent.Command {
cmd := &serpent.Command{
Use: "ping",
Short: "Test agent socket connectivity and health",
Long: "Test connectivity to the local Coder agent socket to verify the agent is running and responsive. Useful for troubleshooting startup issues or verifying the agent is accessible before running other sync commands.",
Handler: func(i *serpent.Invocation) error {
ctx := i.Context()
opts := []agentsocket.Option{}
if *socketPath != "" {
opts = append(opts, agentsocket.WithPath(*socketPath))
}
client, err := agentsocket.NewClient(ctx, opts...)
if err != nil {
return xerrors.Errorf("connect to agent socket: %w", err)
}
defer client.Close()
err = client.Ping(ctx)
if err != nil {
return xerrors.Errorf("ping failed: %w", err)
}
cliui.Info(i.Stdout, "Success")
return nil
},
}
return cmd
}
-101
View File
@@ -1,101 +0,0 @@
package cli
import (
"context"
"time"
"golang.org/x/xerrors"
"github.com/coder/serpent"
"github.com/coder/coder/v2/agent/agentsocket"
"github.com/coder/coder/v2/agent/unit"
"github.com/coder/coder/v2/cli/cliui"
)
const (
syncPollInterval = 1 * time.Second
)
func (*RootCmd) syncStart(socketPath *string) *serpent.Command {
var timeout time.Duration
cmd := &serpent.Command{
Use: "start <unit>",
Short: "Wait until all unit dependencies are satisfied",
Long: "Wait until all dependencies are satisfied, consider the unit to have started, then allow it to proceed. This command polls until dependencies are ready, then marks the unit as started.",
Handler: func(i *serpent.Invocation) error {
ctx := i.Context()
if len(i.Args) != 1 {
return xerrors.New("exactly one unit name is required")
}
unitName := unit.ID(i.Args[0])
if timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, timeout)
defer cancel()
}
opts := []agentsocket.Option{}
if *socketPath != "" {
opts = append(opts, agentsocket.WithPath(*socketPath))
}
client, err := agentsocket.NewClient(ctx, opts...)
if err != nil {
return xerrors.Errorf("connect to agent socket: %w", err)
}
defer client.Close()
ready, err := client.SyncReady(ctx, unitName)
if err != nil {
return xerrors.Errorf("error checking dependencies: %w", err)
}
if !ready {
cliui.Infof(i.Stdout, "Waiting for dependencies of unit '%s' to be satisfied...", unitName)
ticker := time.NewTicker(syncPollInterval)
defer ticker.Stop()
pollLoop:
for {
select {
case <-ctx.Done():
if ctx.Err() == context.DeadlineExceeded {
return xerrors.Errorf("timeout waiting for dependencies of unit '%s'", unitName)
}
return ctx.Err()
case <-ticker.C:
ready, err := client.SyncReady(ctx, unitName)
if err != nil {
return xerrors.Errorf("error checking dependencies: %w", err)
}
if ready {
break pollLoop
}
}
}
}
if err := client.SyncStart(ctx, unitName); err != nil {
return xerrors.Errorf("start unit failed: %w", err)
}
cliui.Info(i.Stdout, "Success")
return nil
},
}
cmd.Options = append(cmd.Options, serpent.Option{
Flag: "timeout",
Description: "Maximum time to wait for dependencies (e.g., 30s, 5m). 5m by default.",
Value: serpent.DurationOf(&timeout),
Default: "5m",
})
return cmd
}
-88
View File
@@ -1,88 +0,0 @@
package cli
import (
"fmt"
"golang.org/x/xerrors"
"github.com/coder/serpent"
"github.com/coder/coder/v2/agent/agentsocket"
"github.com/coder/coder/v2/agent/unit"
"github.com/coder/coder/v2/cli/cliui"
)
func (*RootCmd) syncStatus(socketPath *string) *serpent.Command {
formatter := cliui.NewOutputFormatter(
cliui.ChangeFormatterData(
cliui.TableFormat(
[]agentsocket.DependencyInfo{},
[]string{
"depends on",
"required status",
"current status",
"satisfied",
},
),
func(data any) (any, error) {
resp, ok := data.(agentsocket.SyncStatusResponse)
if !ok {
return nil, xerrors.Errorf("expected agentsocket.SyncStatusResponse, got %T", data)
}
return resp.Dependencies, nil
}),
cliui.JSONFormat(),
)
cmd := &serpent.Command{
Use: "status <unit>",
Short: "Show unit status and dependency state",
Long: "Show the current status of a unit, whether it is ready to start, and lists its dependencies. Shows which dependencies are satisfied and which are still pending. Supports multiple output formats.",
Handler: func(i *serpent.Invocation) error {
ctx := i.Context()
if len(i.Args) != 1 {
return xerrors.New("exactly one unit name is required")
}
unit := unit.ID(i.Args[0])
opts := []agentsocket.Option{}
if *socketPath != "" {
opts = append(opts, agentsocket.WithPath(*socketPath))
}
client, err := agentsocket.NewClient(ctx, opts...)
if err != nil {
return xerrors.Errorf("connect to agent socket: %w", err)
}
defer client.Close()
statusResp, err := client.SyncStatus(ctx, unit)
if err != nil {
return xerrors.Errorf("get status failed: %w", err)
}
var out string
header := fmt.Sprintf("Unit: %s\nStatus: %s\nReady: %t\n\nDependencies:\n", unit, statusResp.Status, statusResp.IsReady)
if formatter.FormatID() == "table" && len(statusResp.Dependencies) == 0 {
out = header + "No dependencies found"
} else {
out, err = formatter.Format(ctx, statusResp)
if err != nil {
return xerrors.Errorf("format status: %w", err)
}
if formatter.FormatID() == "table" {
out = header + out
}
}
_, _ = fmt.Fprintln(i.Stdout, out)
return nil
},
}
formatter.AttachOptions(&cmd.Options)
return cmd
}
-330
View File
@@ -1,330 +0,0 @@
//go:build !windows
package cli_test
import (
"bytes"
"context"
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/require"
"cdr.dev/slog"
"github.com/coder/coder/v2/agent/agentsocket"
"github.com/coder/coder/v2/cli/clitest"
"github.com/coder/coder/v2/testutil"
)
// setupSocketServer creates an agentsocket server at a temporary path for testing.
// Returns the socket path and a cleanup function. The path should be passed to
// sync commands via the --socket-path flag.
func setupSocketServer(t *testing.T) (path string, cleanup func()) {
t.Helper()
// Use a temporary socket path for each test
socketPath := filepath.Join(tempDirUnixSocket(t), "test.sock")
// Create parent directory if needed
parentDir := filepath.Dir(socketPath)
err := os.MkdirAll(parentDir, 0o700)
require.NoError(t, err, "create socket directory")
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
agentsocket.WithPath(socketPath),
)
require.NoError(t, err, "create socket server")
// Return cleanup function
return socketPath, func() {
err := server.Close()
require.NoError(t, err, "close socket server")
_ = os.Remove(socketPath)
}
}
func TestSyncCommands_Golden(t *testing.T) {
t.Parallel()
t.Run("ping", func(t *testing.T) {
t.Parallel()
path, cleanup := setupSocketServer(t)
defer cleanup()
ctx := testutil.Context(t, testutil.WaitShort)
var outBuf bytes.Buffer
inv, _ := clitest.New(t, "exp", "sync", "ping", "--socket-path", path)
inv.Stdout = &outBuf
inv.Stderr = &outBuf
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
clitest.TestGoldenFile(t, "TestSyncCommands_Golden/ping_success", outBuf.Bytes(), nil)
})
t.Run("start_no_dependencies", func(t *testing.T) {
t.Parallel()
path, cleanup := setupSocketServer(t)
defer cleanup()
ctx := testutil.Context(t, testutil.WaitShort)
var outBuf bytes.Buffer
inv, _ := clitest.New(t, "exp", "sync", "start", "test-unit", "--socket-path", path)
inv.Stdout = &outBuf
inv.Stderr = &outBuf
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
clitest.TestGoldenFile(t, "TestSyncCommands_Golden/start_no_dependencies", outBuf.Bytes(), nil)
})
t.Run("start_with_dependencies", func(t *testing.T) {
t.Parallel()
path, cleanup := setupSocketServer(t)
defer cleanup()
ctx := testutil.Context(t, testutil.WaitShort)
// Set up dependency: test-unit depends on dep-unit
client, err := agentsocket.NewClient(ctx, agentsocket.WithPath(path))
require.NoError(t, err)
// Declare dependency
err = client.SyncWant(ctx, "test-unit", "dep-unit")
require.NoError(t, err)
client.Close()
// Start a goroutine to complete the dependency after a short delay
// This simulates the dependency being satisfied while start is waiting
// The delay ensures the "Waiting..." message appears in the output
done := make(chan error, 1)
go func() {
// Wait a moment to let the start command begin waiting and print the message
time.Sleep(100 * time.Millisecond)
compCtx := context.Background()
compClient, err := agentsocket.NewClient(compCtx, agentsocket.WithPath(path))
if err != nil {
done <- err
return
}
defer compClient.Close()
// Start and complete the dependency unit
err = compClient.SyncStart(compCtx, "dep-unit")
if err != nil {
done <- err
return
}
err = compClient.SyncComplete(compCtx, "dep-unit")
done <- err
}()
var outBuf bytes.Buffer
inv, _ := clitest.New(t, "exp", "sync", "start", "test-unit", "--socket-path", path)
inv.Stdout = &outBuf
inv.Stderr = &outBuf
// Run the start command - it should wait for the dependency
err = inv.WithContext(ctx).Run()
require.NoError(t, err)
// Ensure the completion goroutine finished
select {
case err := <-done:
require.NoError(t, err, "complete dependency")
case <-time.After(time.Second):
// Goroutine should have finished by now
}
clitest.TestGoldenFile(t, "TestSyncCommands_Golden/start_with_dependencies", outBuf.Bytes(), nil)
})
t.Run("want", func(t *testing.T) {
t.Parallel()
path, cleanup := setupSocketServer(t)
defer cleanup()
ctx := testutil.Context(t, testutil.WaitShort)
var outBuf bytes.Buffer
inv, _ := clitest.New(t, "exp", "sync", "want", "test-unit", "dep-unit", "--socket-path", path)
inv.Stdout = &outBuf
inv.Stderr = &outBuf
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
clitest.TestGoldenFile(t, "TestSyncCommands_Golden/want_success", outBuf.Bytes(), nil)
})
t.Run("complete", func(t *testing.T) {
t.Parallel()
path, cleanup := setupSocketServer(t)
defer cleanup()
ctx := testutil.Context(t, testutil.WaitShort)
// First start the unit
client, err := agentsocket.NewClient(ctx, agentsocket.WithPath(path))
require.NoError(t, err)
err = client.SyncStart(ctx, "test-unit")
require.NoError(t, err)
client.Close()
var outBuf bytes.Buffer
inv, _ := clitest.New(t, "exp", "sync", "complete", "test-unit", "--socket-path", path)
inv.Stdout = &outBuf
inv.Stderr = &outBuf
err = inv.WithContext(ctx).Run()
require.NoError(t, err)
clitest.TestGoldenFile(t, "TestSyncCommands_Golden/complete_success", outBuf.Bytes(), nil)
})
t.Run("status_pending", func(t *testing.T) {
t.Parallel()
path, cleanup := setupSocketServer(t)
defer cleanup()
ctx := testutil.Context(t, testutil.WaitShort)
// Set up a unit with unsatisfied dependency
client, err := agentsocket.NewClient(ctx, agentsocket.WithPath(path))
require.NoError(t, err)
err = client.SyncWant(ctx, "test-unit", "dep-unit")
require.NoError(t, err)
client.Close()
var outBuf bytes.Buffer
inv, _ := clitest.New(t, "exp", "sync", "status", "test-unit", "--socket-path", path)
inv.Stdout = &outBuf
inv.Stderr = &outBuf
err = inv.WithContext(ctx).Run()
require.NoError(t, err)
clitest.TestGoldenFile(t, "TestSyncCommands_Golden/status_pending", outBuf.Bytes(), nil)
})
t.Run("status_started", func(t *testing.T) {
t.Parallel()
path, cleanup := setupSocketServer(t)
defer cleanup()
ctx := testutil.Context(t, testutil.WaitShort)
// Start a unit
client, err := agentsocket.NewClient(ctx, agentsocket.WithPath(path))
require.NoError(t, err)
err = client.SyncStart(ctx, "test-unit")
require.NoError(t, err)
client.Close()
var outBuf bytes.Buffer
inv, _ := clitest.New(t, "exp", "sync", "status", "test-unit", "--socket-path", path)
inv.Stdout = &outBuf
inv.Stderr = &outBuf
err = inv.WithContext(ctx).Run()
require.NoError(t, err)
clitest.TestGoldenFile(t, "TestSyncCommands_Golden/status_started", outBuf.Bytes(), nil)
})
t.Run("status_completed", func(t *testing.T) {
t.Parallel()
path, cleanup := setupSocketServer(t)
defer cleanup()
ctx := testutil.Context(t, testutil.WaitShort)
// Start and complete a unit
client, err := agentsocket.NewClient(ctx, agentsocket.WithPath(path))
require.NoError(t, err)
err = client.SyncStart(ctx, "test-unit")
require.NoError(t, err)
err = client.SyncComplete(ctx, "test-unit")
require.NoError(t, err)
client.Close()
var outBuf bytes.Buffer
inv, _ := clitest.New(t, "exp", "sync", "status", "test-unit", "--socket-path", path)
inv.Stdout = &outBuf
inv.Stderr = &outBuf
err = inv.WithContext(ctx).Run()
require.NoError(t, err)
clitest.TestGoldenFile(t, "TestSyncCommands_Golden/status_completed", outBuf.Bytes(), nil)
})
t.Run("status_with_dependencies", func(t *testing.T) {
t.Parallel()
path, cleanup := setupSocketServer(t)
defer cleanup()
ctx := testutil.Context(t, testutil.WaitShort)
// Set up a unit with dependencies, some satisfied, some not
client, err := agentsocket.NewClient(ctx, agentsocket.WithPath(path))
require.NoError(t, err)
err = client.SyncWant(ctx, "test-unit", "dep-1")
require.NoError(t, err)
err = client.SyncWant(ctx, "test-unit", "dep-2")
require.NoError(t, err)
// Complete dep-1, leave dep-2 incomplete
err = client.SyncStart(ctx, "dep-1")
require.NoError(t, err)
err = client.SyncComplete(ctx, "dep-1")
require.NoError(t, err)
client.Close()
var outBuf bytes.Buffer
inv, _ := clitest.New(t, "exp", "sync", "status", "test-unit", "--socket-path", path)
inv.Stdout = &outBuf
inv.Stderr = &outBuf
err = inv.WithContext(ctx).Run()
require.NoError(t, err)
clitest.TestGoldenFile(t, "TestSyncCommands_Golden/status_with_dependencies", outBuf.Bytes(), nil)
})
t.Run("status_json_format", func(t *testing.T) {
t.Parallel()
path, cleanup := setupSocketServer(t)
defer cleanup()
ctx := testutil.Context(t, testutil.WaitShort)
// Set up a unit with dependencies
client, err := agentsocket.NewClient(ctx, agentsocket.WithPath(path))
require.NoError(t, err)
err = client.SyncWant(ctx, "test-unit", "dep-unit")
require.NoError(t, err)
err = client.SyncStart(ctx, "dep-unit")
require.NoError(t, err)
err = client.SyncComplete(ctx, "dep-unit")
require.NoError(t, err)
client.Close()
var outBuf bytes.Buffer
inv, _ := clitest.New(t, "exp", "sync", "status", "test-unit", "--output", "json", "--socket-path", path)
inv.Stdout = &outBuf
inv.Stderr = &outBuf
err = inv.WithContext(ctx).Run()
require.NoError(t, err)
clitest.TestGoldenFile(t, "TestSyncCommands_Golden/status_json_format", outBuf.Bytes(), nil)
})
}
-49
View File
@@ -1,49 +0,0 @@
package cli
import (
"golang.org/x/xerrors"
"github.com/coder/serpent"
"github.com/coder/coder/v2/agent/agentsocket"
"github.com/coder/coder/v2/agent/unit"
"github.com/coder/coder/v2/cli/cliui"
)
func (*RootCmd) syncWant(socketPath *string) *serpent.Command {
cmd := &serpent.Command{
Use: "want <unit> <depends-on>",
Short: "Declare that a unit depends on another unit completing before it can start",
Long: "Declare that a unit depends on another unit completing before it can start. The unit specified first will not start until the second has signaled that it has completed.",
Handler: func(i *serpent.Invocation) error {
ctx := i.Context()
if len(i.Args) != 2 {
return xerrors.New("exactly two arguments are required: unit and depends-on")
}
dependentUnit := unit.ID(i.Args[0])
dependsOn := unit.ID(i.Args[1])
opts := []agentsocket.Option{}
if *socketPath != "" {
opts = append(opts, agentsocket.WithPath(*socketPath))
}
client, err := agentsocket.NewClient(ctx, opts...)
if err != nil {
return xerrors.Errorf("connect to agent socket: %w", err)
}
defer client.Close()
if err := client.SyncWant(ctx, dependentUnit, dependsOn); err != nil {
return xerrors.Errorf("declare dependency failed: %w", err)
}
cliui.Info(i.Stdout, "Success")
return nil
},
}
return cmd
}
@@ -1 +0,0 @@
Success
@@ -1 +0,0 @@
Success
@@ -1 +0,0 @@
Success
@@ -1,2 +0,0 @@
Waiting for dependencies of unit 'test-unit' to be satisfied...
Success
@@ -1,6 +0,0 @@
Unit: test-unit
Status: completed
Ready: true
Dependencies:
No dependencies found
@@ -1,13 +0,0 @@
{
"unit_name": "test-unit",
"status": "pending",
"is_ready": true,
"dependencies": [
{
"depends_on": "dep-unit",
"required_status": "completed",
"current_status": "completed",
"is_satisfied": true
}
]
}
@@ -1,7 +0,0 @@
Unit: test-unit
Status: pending
Ready: false
Dependencies:
DEPENDS ON REQUIRED STATUS CURRENT STATUS SATISFIED
dep-unit completed not registered false
@@ -1,6 +0,0 @@
Unit: test-unit
Status: started
Ready: true
Dependencies:
No dependencies found

Some files were not shown because too many files have changed in this diff Show More