Compare commits

..

2 Commits

Author SHA1 Message Date
Thomas Kosiewski 8517ca1720 refactor(toolsdk): reuse agent resolution for WorkspaceBash 2026-02-04 15:45:00 +00:00
Thomas Kosiewski 2e789b06e7 fix(toolsdk): block WorkspaceBash on Windows agents 2026-02-04 15:06:05 +00:00
581 changed files with 11333 additions and 24811 deletions
-4
View File
@@ -1,4 +0,0 @@
# All artifacts of the build processed are dumped here.
# Ignore it for docker context, as all Dockerfiles should build their own
# binaries.
build
@@ -1,18 +0,0 @@
name: "Setup GNU tools (macOS)"
description: |
Installs GNU versions of bash, getopt, and make on macOS runners.
Required because lib.sh needs bash 4+, GNU getopt, and make 4+.
This is a no-op on non-macOS runners.
runs:
using: "composite"
steps:
- name: Setup GNU tools (macOS)
if: runner.os == 'macOS'
shell: bash
run: |
brew install bash gnu-getopt make
{
echo "$(brew --prefix bash)/bin"
echo "$(brew --prefix gnu-getopt)/bin"
echo "$(brew --prefix make)/libexec/gnubin"
} >> "$GITHUB_PATH"
+1 -1
View File
@@ -4,7 +4,7 @@ description: |
inputs:
version:
description: "The Go version to use."
default: "1.25.7"
default: "1.25.6"
use-preinstalled-go:
description: "Whether to use preinstalled Go."
default: "false"
+1 -1
View File
@@ -7,5 +7,5 @@ runs:
- name: Install Terraform
uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3.1.2
with:
terraform_version: 1.14.5
terraform_version: 1.14.1
terraform_wrapper: false
+41 -36
View File
@@ -35,7 +35,7 @@ jobs:
tailnet-integration: ${{ steps.filter.outputs.tailnet-integration }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -157,7 +157,7 @@ jobs:
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -181,7 +181,7 @@ jobs:
echo "LINT_CACHE_DIR=$dir" >> "$GITHUB_ENV"
- name: golangci-lint cache
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2
with:
path: |
${{ env.LINT_CACHE_DIR }}
@@ -241,13 +241,11 @@ jobs:
lint-actions:
needs: changes
# Only run this job if changes to CI workflow files are detected. This job
# can flake as it reaches out to GitHub to check referenced actions.
if: needs.changes.outputs.ci == 'true'
if: needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -272,7 +270,7 @@ jobs:
if: ${{ !cancelled() }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -329,7 +327,7 @@ jobs:
timeout-minutes: 20
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -381,7 +379,7 @@ jobs:
- windows-2022
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -416,8 +414,17 @@ jobs:
id: go-paths
uses: ./.github/actions/setup-go-paths
# macOS default bash and coreutils are too old for our scripts
# (lib.sh requires bash 4+, GNU getopt, make 4+).
- name: Setup GNU tools (macOS)
uses: ./.github/actions/setup-gnu-tools
if: runner.os == 'macOS'
run: |
brew install bash gnu-getopt make
{
echo "$(brew --prefix bash)/bin"
echo "$(brew --prefix gnu-getopt)/bin"
echo "$(brew --prefix make)/libexec/gnubin"
} >> "$GITHUB_PATH"
- name: Setup Go
uses: ./.github/actions/setup-go
@@ -489,14 +496,6 @@ jobs:
# macOS will output "The default interactive shell is now zsh" intermittently in CI.
touch ~/.bash_profile && echo "export BASH_SILENCE_DEPRECATION_WARNING=1" >> ~/.bash_profile
- name: Increase PTY limit (macOS)
if: runner.os == 'macOS'
shell: bash
run: |
# Increase PTY limit to avoid exhaustion during tests.
# Default is 511; 999 is the maximum value on CI runner.
sudo sysctl -w kern.tty.ptmx_max=999
- name: Test with PostgreSQL Database (Linux)
if: runner.os == 'Linux'
uses: ./.github/actions/test-go-pg
@@ -586,7 +585,7 @@ jobs:
timeout-minutes: 25
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -648,7 +647,7 @@ jobs:
timeout-minutes: 25
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -720,7 +719,7 @@ jobs:
timeout-minutes: 20
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -747,7 +746,7 @@ jobs:
timeout-minutes: 20
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -780,7 +779,7 @@ jobs:
name: ${{ matrix.variant.name }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -860,7 +859,7 @@ jobs:
if: needs.changes.outputs.site == 'true' || needs.changes.outputs.ci == 'true'
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -941,7 +940,7 @@ jobs:
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -1013,7 +1012,7 @@ jobs:
if: always()
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -1057,8 +1056,14 @@ jobs:
fetch-depth: 0
persist-credentials: false
- name: Setup GNU tools (macOS)
uses: ./.github/actions/setup-gnu-tools
- name: Setup build tools
run: |
brew install bash gnu-getopt make
{
echo "$(brew --prefix bash)/bin"
echo "$(brew --prefix gnu-getopt)/bin"
echo "$(brew --prefix make)/libexec/gnubin"
} >> "$GITHUB_PATH"
- name: Switch XCode Version
uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0
@@ -1128,7 +1133,7 @@ jobs:
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -1183,7 +1188,7 @@ jobs:
IMAGE: ghcr.io/coder/coder-preview:${{ steps.build-docker.outputs.tag }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -1194,7 +1199,7 @@ jobs:
persist-credentials: false
- name: GHCR Login
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: ghcr.io
username: ${{ github.actor }}
@@ -1401,7 +1406,7 @@ jobs:
id: attest_main
if: github.ref == 'refs/heads/main'
continue-on-error: true
uses: actions/attest@e59cbc1ad1ac2d59339667419eb8cdde6eb61e3d # v3.2.0
uses: actions/attest@7667f588f2f73a90cea6c7ac70e78266c4f76616 # v3.1.0
with:
subject-name: "ghcr.io/coder/coder-preview:main"
predicate-type: "https://slsa.dev/provenance/v1"
@@ -1438,7 +1443,7 @@ jobs:
id: attest_latest
if: github.ref == 'refs/heads/main'
continue-on-error: true
uses: actions/attest@e59cbc1ad1ac2d59339667419eb8cdde6eb61e3d # v3.2.0
uses: actions/attest@7667f588f2f73a90cea6c7ac70e78266c4f76616 # v3.1.0
with:
subject-name: "ghcr.io/coder/coder-preview:latest"
predicate-type: "https://slsa.dev/provenance/v1"
@@ -1475,7 +1480,7 @@ jobs:
id: attest_version
if: github.ref == 'refs/heads/main'
continue-on-error: true
uses: actions/attest@e59cbc1ad1ac2d59339667419eb8cdde6eb61e3d # v3.2.0
uses: actions/attest@7667f588f2f73a90cea6c7ac70e78266c4f76616 # v3.1.0
with:
subject-name: "ghcr.io/coder/coder-preview:${{ steps.build-docker.outputs.tag }}"
predicate-type: "https://slsa.dev/provenance/v1"
@@ -1580,7 +1585,7 @@ jobs:
if: needs.changes.outputs.db == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
+19 -1
View File
@@ -6,7 +6,9 @@
# native suggestion syntax, allowing one-click commits of suggested changes.
#
# Triggers:
# - Label "code-review" added: Run review on demand
# - New PR opened: Initial code review
# - Label "code-review" added: Re-run review on demand
# - PR marked ready for review: Review when draft is promoted
# - Workflow dispatch: Manual run with PR URL
#
# Note: This workflow requires access to secrets and will be skipped for:
@@ -18,7 +20,9 @@ name: AI Code Review
on:
pull_request:
types:
- opened
- labeled
- ready_for_review
workflow_dispatch:
inputs:
pr_url:
@@ -40,7 +44,9 @@ jobs:
cancel-in-progress: true
if: |
(
github.event.action == 'opened' ||
github.event.label.name == 'code-review' ||
github.event.action == 'ready_for_review' ||
github.event_name == 'workflow_dispatch'
) &&
(github.event.pull_request.draft == false || github.event_name == 'workflow_dispatch')
@@ -121,9 +127,15 @@ jobs:
# Set trigger type based on action
case "${GITHUB_EVENT_ACTION}" in
opened)
echo "trigger_type=new_pr" >> "${GITHUB_OUTPUT}"
;;
labeled)
echo "trigger_type=label_requested" >> "${GITHUB_OUTPUT}"
;;
ready_for_review)
echo "trigger_type=ready_for_review" >> "${GITHUB_OUTPUT}"
;;
*)
echo "trigger_type=unknown" >> "${GITHUB_OUTPUT}"
;;
@@ -145,9 +157,15 @@ jobs:
# Build context based on trigger type
case "${TRIGGER_TYPE}" in
new_pr)
CONTEXT="This is a NEW PR. Perform a thorough code review."
;;
label_requested)
CONTEXT="A code review was REQUESTED via label. Perform a thorough code review."
;;
ready_for_review)
CONTEXT="This PR was marked READY FOR REVIEW. Perform a thorough code review."
;;
manual)
CONTEXT="This is a MANUAL review request. Perform a thorough code review."
;;
+4 -4
View File
@@ -36,7 +36,7 @@ jobs:
verdict: ${{ steps.check.outputs.verdict }} # DEPLOY or NOOP
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -65,7 +65,7 @@ jobs:
packages: write # to retag image as dogfood
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -76,7 +76,7 @@ jobs:
persist-credentials: false
- name: GHCR Login
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: ghcr.io
username: ${{ github.actor }}
@@ -146,7 +146,7 @@ jobs:
needs: deploy
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
+14 -24
View File
@@ -160,41 +160,34 @@ jobs:
# Build context based on trigger type
case "${TRIGGER_TYPE}" in
new_pr)
CONTEXT="This is a NEW PR. Perform initial documentation review."
CONTEXT="This is a NEW PR. Perform a thorough documentation review."
;;
pr_updated)
CONTEXT="This PR was UPDATED with new commits. Check if previous feedback was addressed or if new doc needs arose."
CONTEXT="This PR was UPDATED with new commits. Only comment if the changes affect documentation needs or address previous feedback."
;;
label_requested)
CONTEXT="A documentation review was REQUESTED via label. Perform a thorough review."
CONTEXT="A documentation review was REQUESTED via label. Perform a thorough documentation review."
;;
ready_for_review)
CONTEXT="This PR was marked READY FOR REVIEW. Perform a thorough review."
CONTEXT="This PR was marked READY FOR REVIEW (converted from draft). Perform a thorough documentation review."
;;
manual)
CONTEXT="This is a MANUAL review request. Perform a thorough review."
CONTEXT="This is a MANUAL review request. Perform a thorough documentation review."
;;
*)
CONTEXT="Perform a documentation review."
CONTEXT="Perform a thorough documentation review."
;;
esac
# Build task prompt with sticky comment logic
# Build task prompt with PR-specific context
TASK_PROMPT="Use the doc-check skill to review PR #${PR_NUMBER} in coder/coder.
${CONTEXT}
Use \`gh\` to get PR details, diff, and all comments. Look for an existing doc-check comment containing \`<!-- doc-check-sticky -->\` - if one exists, you'll update it instead of creating a new one.
Use \`gh\` to get PR details, diff, and all comments. Check for previous doc-check comments (from coder-doc-check) and only post a new comment if it adds value.
**Do not comment if no documentation changes are needed.**
If a sticky comment already exists, compare your current findings against it:
- Check off \`[x]\` items that are now addressed
- Strikethrough items no longer needed (e.g., code was reverted)
- Add new unchecked \`[ ]\` items for newly discovered needs
- If an item is checked but you can't verify the docs were added, add a warning note below it
- If nothing meaningful changed, don't update the comment at all
## Comment format
Use this structure (only include relevant sections):
@@ -202,21 +195,18 @@ jobs:
\`\`\`
## Documentation Check
### Previous Feedback
[For re-reviews only: Addressed | Partially addressed | Not yet addressed]
### Updates Needed
- [ ] \`docs/path/file.md\` - What needs to change
- [x] \`docs/other/file.md\` - This was addressed
- ~~\`docs/removed.md\` - No longer needed~~ *(reverted in abc123)*
- [ ] \`docs/path/file.md\` - [what needs to change]
### New Documentation Needed
- [ ] \`docs/suggested/path.md\` - What should be documented
> ⚠️ *Checked but no corresponding documentation changes found in this PR*
- [ ] \`docs/suggested/path.md\` - [what should be documented]
---
*Automated review via [Coder Tasks](https://coder.com/docs/ai-coder/tasks)*
<!-- doc-check-sticky -->
\`\`\`
The \`<!-- doc-check-sticky -->\` marker must be at the end so future runs can find and update this comment."
\`\`\`"
# Output the prompt
{
+4 -4
View File
@@ -38,7 +38,7 @@ jobs:
if: github.repository_owner == 'coder'
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -48,7 +48,7 @@ jobs:
persist-credentials: false
- name: Docker login
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: ghcr.io
username: ${{ github.actor }}
@@ -58,11 +58,11 @@ jobs:
run: mkdir base-build-context
- name: Install depot.dev CLI
uses: depot/setup-action@15c09a5f77a0840ad4bce955686522a257853461 # v1.7.1
uses: depot/setup-action@b0b1ea4f69e92ebf5dea3f8713a1b0c37b2126a5 # v1.6.0
# This uses OIDC authentication, so no auth variables are required.
- name: Build base Docker image via depot.dev
uses: depot/build-push-action@5f3b3c2e5a00f0093de47f657aeaefcedff27d18 # v1.17.0
uses: depot/build-push-action@9785b135c3c76c33db102e45be96a25ab55cd507 # v1.16.2
with:
project: wl5hnrrkns
context: base-build-context
+6 -6
View File
@@ -26,7 +26,7 @@ jobs:
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-4' || 'ubuntu-latest' }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -42,7 +42,7 @@ jobs:
# on version 2.29 and above.
nix_version: "2.28.5"
- uses: nix-community/cache-nix-action@7df957e333c1e5da7721f60227dbba6d06080569 # v7.0.2
- uses: nix-community/cache-nix-action@106bba72ed8e29c8357661199511ef07790175e9 # v7.0.1
with:
# restore and save a cache using this key
primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
@@ -75,20 +75,20 @@ jobs:
BRANCH_NAME: ${{ steps.branch-name.outputs.current_branch }}
- name: Set up Depot CLI
uses: depot/setup-action@15c09a5f77a0840ad4bce955686522a257853461 # v1.7.1
uses: depot/setup-action@b0b1ea4f69e92ebf5dea3f8713a1b0c37b2126a5 # v1.6.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
- name: Login to DockerHub
if: github.ref == 'refs/heads/main'
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Build and push Non-Nix image
uses: depot/build-push-action@5f3b3c2e5a00f0093de47f657aeaefcedff27d18 # v1.17.0
uses: depot/build-push-action@9785b135c3c76c33db102e45be96a25ab55cd507 # v1.16.2
with:
project: b4q6ltmpzh
token: ${{ secrets.DEPOT_TOKEN }}
@@ -125,7 +125,7 @@ jobs:
id-token: write
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
+1 -4
View File
@@ -28,7 +28,7 @@ jobs:
- windows-2022
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -59,9 +59,6 @@ jobs:
fetch-depth: 1
persist-credentials: false
- name: Setup GNU tools (macOS)
uses: ./.github/actions/setup-gnu-tools
- name: Setup Go
uses: ./.github/actions/setup-go
with:
+1 -1
View File
@@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
+1 -1
View File
@@ -19,7 +19,7 @@ jobs:
packages: write
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
+6 -6
View File
@@ -39,7 +39,7 @@ jobs:
PR_OPEN: ${{ steps.check_pr.outputs.pr_open }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -76,7 +76,7 @@ jobs:
runs-on: "ubuntu-latest"
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -184,7 +184,7 @@ jobs:
pull-requests: write # needed for commenting on PRs
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -228,7 +228,7 @@ jobs:
CODER_IMAGE_TAG: ${{ needs.get_info.outputs.CODER_IMAGE_TAG }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -248,7 +248,7 @@ jobs:
uses: ./.github/actions/setup-sqlc
- name: GHCR Login
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: ghcr.io
username: ${{ github.actor }}
@@ -288,7 +288,7 @@ jobs:
PR_HOSTNAME: "pr${{ needs.get_info.outputs.PR_NUMBER }}.${{ secrets.PR_DEPLOYMENTS_DOMAIN }}"
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
+1 -1
View File
@@ -14,7 +14,7 @@ jobs:
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
+18 -12
View File
@@ -78,8 +78,14 @@ jobs:
- name: Fetch git tags
run: git fetch --tags --force
- name: Setup GNU tools (macOS)
uses: ./.github/actions/setup-gnu-tools
- name: Setup build tools
run: |
brew install bash gnu-getopt make
{
echo "$(brew --prefix bash)/bin"
echo "$(brew --prefix gnu-getopt)/bin"
echo "$(brew --prefix make)/libexec/gnubin"
} >> "$GITHUB_PATH"
- name: Switch XCode Version
uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0
@@ -158,7 +164,7 @@ jobs:
version: ${{ steps.version.outputs.version }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -233,7 +239,7 @@ jobs:
cat "$CODER_RELEASE_NOTES_FILE"
- name: Docker Login
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: ghcr.io
username: ${{ github.actor }}
@@ -386,12 +392,12 @@ jobs:
- name: Install depot.dev CLI
if: steps.image-base-tag.outputs.tag != ''
uses: depot/setup-action@15c09a5f77a0840ad4bce955686522a257853461 # v1.7.1
uses: depot/setup-action@b0b1ea4f69e92ebf5dea3f8713a1b0c37b2126a5 # v1.6.0
# This uses OIDC authentication, so no auth variables are required.
- name: Build base Docker image via depot.dev
if: steps.image-base-tag.outputs.tag != ''
uses: depot/build-push-action@5f3b3c2e5a00f0093de47f657aeaefcedff27d18 # v1.17.0
uses: depot/build-push-action@9785b135c3c76c33db102e45be96a25ab55cd507 # v1.16.2
with:
project: wl5hnrrkns
context: base-build-context
@@ -448,7 +454,7 @@ jobs:
id: attest_base
if: ${{ !inputs.dry_run && steps.image-base-tag.outputs.tag != '' }}
continue-on-error: true
uses: actions/attest@e59cbc1ad1ac2d59339667419eb8cdde6eb61e3d # v3.2.0
uses: actions/attest@7667f588f2f73a90cea6c7ac70e78266c4f76616 # v3.1.0
with:
subject-name: ${{ steps.image-base-tag.outputs.tag }}
predicate-type: "https://slsa.dev/provenance/v1"
@@ -564,7 +570,7 @@ jobs:
id: attest_main
if: ${{ !inputs.dry_run }}
continue-on-error: true
uses: actions/attest@e59cbc1ad1ac2d59339667419eb8cdde6eb61e3d # v3.2.0
uses: actions/attest@7667f588f2f73a90cea6c7ac70e78266c4f76616 # v3.1.0
with:
subject-name: ${{ steps.build_docker.outputs.multiarch_image }}
predicate-type: "https://slsa.dev/provenance/v1"
@@ -608,7 +614,7 @@ jobs:
id: attest_latest
if: ${{ !inputs.dry_run && steps.build_docker.outputs.created_latest_tag == 'true' }}
continue-on-error: true
uses: actions/attest@e59cbc1ad1ac2d59339667419eb8cdde6eb61e3d # v3.2.0
uses: actions/attest@7667f588f2f73a90cea6c7ac70e78266c4f76616 # v3.1.0
with:
subject-name: ${{ steps.latest_tag.outputs.tag }}
predicate-type: "https://slsa.dev/provenance/v1"
@@ -796,7 +802,7 @@ jobs:
# TODO: skip this if it's not a new release (i.e. a backport). This is
# fine right now because it just makes a PR that we can close.
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -872,7 +878,7 @@ jobs:
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -965,7 +971,7 @@ jobs:
if: ${{ !inputs.dry_run }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
+1 -1
View File
@@ -20,7 +20,7 @@ jobs:
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
+3 -3
View File
@@ -27,7 +27,7 @@ jobs:
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -69,7 +69,7 @@ jobs:
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -146,7 +146,7 @@ jobs:
echo "image=$(cat "$image_job")" >> "$GITHUB_OUTPUT"
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@c1824fd6edce30d7ab345a9989de00bbd46ef284 # v0.34.0
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8
with:
image-ref: ${{ steps.build.outputs.image }}
format: sarif
+3 -3
View File
@@ -18,7 +18,7 @@ jobs:
pull-requests: write
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -96,7 +96,7 @@ jobs:
contents: write
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
@@ -120,7 +120,7 @@ jobs:
actions: write
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
+1 -1
View File
@@ -21,7 +21,7 @@ jobs:
pull-requests: write # required to post PR review comments by the action
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
with:
egress-policy: audit
-3
View File
@@ -98,6 +98,3 @@ AGENTS.local.md
# Ignore plans written by AI agents.
PLAN.md
# Ignore any dev licenses
license.txt
+1 -5
View File
@@ -909,10 +909,7 @@ site/src/api/countriesGenerated.ts: site/node_modules/.installed scripts/typegen
(cd site/ && pnpm exec biome format --write src/api/countriesGenerated.ts)
touch "$@"
scripts/metricsdocgen/generated_metrics: $(GO_SRC_FILES)
go run ./scripts/metricsdocgen/scanner > $@
docs/admin/integrations/prometheus.md: node_modules/.installed scripts/metricsdocgen/main.go scripts/metricsdocgen/metrics scripts/metricsdocgen/generated_metrics
docs/admin/integrations/prometheus.md: node_modules/.installed scripts/metricsdocgen/main.go scripts/metricsdocgen/metrics
go run scripts/metricsdocgen/main.go
pnpm exec markdownlint-cli2 --fix ./docs/admin/integrations/prometheus.md
pnpm exec markdown-table-formatter ./docs/admin/integrations/prometheus.md
@@ -941,7 +938,6 @@ coderd/apidoc/.gen: \
coderd/rbac/object_gen.go \
.swaggo \
scripts/apidocgen/generate.sh \
scripts/apidocgen/swaginit/main.go \
$(wildcard scripts/apidocgen/postprocess/*) \
$(wildcard scripts/apidocgen/markdown-template/*)
./scripts/apidocgen/generate.sh
+2 -10
View File
@@ -111,12 +111,6 @@ type Client interface {
ConnectRPC28(ctx context.Context) (
proto.DRPCAgentClient28, tailnetproto.DRPCTailnetClient28, error,
)
// ConnectRPC28WithRole is like ConnectRPC28 but sends an explicit
// role query parameter to the server. The workspace agent should
// use role "agent" to enable connection monitoring.
ConnectRPC28WithRole(ctx context.Context, role string) (
proto.DRPCAgentClient28, tailnetproto.DRPCTailnetClient28, error,
)
tailnet.DERPMapRewriter
agentsdk.RefreshableSessionTokenProvider
}
@@ -1003,10 +997,8 @@ func (a *agent) run() (retErr error) {
return xerrors.Errorf("refresh token: %w", err)
}
// ConnectRPC returns the dRPC connection we use for the Agent and Tailnet v2+ APIs.
// We pass role "agent" to enable connection monitoring on the server, which tracks
// the agent's connectivity state (first_connected_at, last_connected_at, disconnected_at).
aAPI, tAPI, err := a.client.ConnectRPC28WithRole(a.hardCtx, "agent")
// ConnectRPC returns the dRPC connection we use for the Agent and Tailnet v2+ APIs
aAPI, tAPI, err := a.client.ConnectRPC28(a.hardCtx)
if err != nil {
return err
}
+2 -71
View File
@@ -1,9 +1,9 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: .. (interfaces: ContainerCLI,DevcontainerCLI,SubAgentClient)
// Source: .. (interfaces: ContainerCLI,DevcontainerCLI)
//
// Generated by this command:
//
// mockgen -destination ./acmock.go -package acmock .. ContainerCLI,DevcontainerCLI,SubAgentClient
// mockgen -destination ./acmock.go -package acmock .. ContainerCLI,DevcontainerCLI
//
// Package acmock is a generated GoMock package.
@@ -15,7 +15,6 @@ import (
agentcontainers "github.com/coder/coder/v2/agent/agentcontainers"
codersdk "github.com/coder/coder/v2/codersdk"
uuid "github.com/google/uuid"
gomock "go.uber.org/mock/gomock"
)
@@ -217,71 +216,3 @@ func (mr *MockDevcontainerCLIMockRecorder) Up(ctx, workspaceFolder, configPath a
varargs := append([]any{ctx, workspaceFolder, configPath}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Up", reflect.TypeOf((*MockDevcontainerCLI)(nil).Up), varargs...)
}
// MockSubAgentClient is a mock of SubAgentClient interface.
type MockSubAgentClient struct {
ctrl *gomock.Controller
recorder *MockSubAgentClientMockRecorder
isgomock struct{}
}
// MockSubAgentClientMockRecorder is the mock recorder for MockSubAgentClient.
type MockSubAgentClientMockRecorder struct {
mock *MockSubAgentClient
}
// NewMockSubAgentClient creates a new mock instance.
func NewMockSubAgentClient(ctrl *gomock.Controller) *MockSubAgentClient {
mock := &MockSubAgentClient{ctrl: ctrl}
mock.recorder = &MockSubAgentClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockSubAgentClient) EXPECT() *MockSubAgentClientMockRecorder {
return m.recorder
}
// Create mocks base method.
func (m *MockSubAgentClient) Create(ctx context.Context, agent agentcontainers.SubAgent) (agentcontainers.SubAgent, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Create", ctx, agent)
ret0, _ := ret[0].(agentcontainers.SubAgent)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Create indicates an expected call of Create.
func (mr *MockSubAgentClientMockRecorder) Create(ctx, agent any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockSubAgentClient)(nil).Create), ctx, agent)
}
// Delete mocks base method.
func (m *MockSubAgentClient) Delete(ctx context.Context, id uuid.UUID) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", ctx, id)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete.
func (mr *MockSubAgentClientMockRecorder) Delete(ctx, id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockSubAgentClient)(nil).Delete), ctx, id)
}
// List mocks base method.
func (m *MockSubAgentClient) List(ctx context.Context) ([]agentcontainers.SubAgent, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "List", ctx)
ret0, _ := ret[0].([]agentcontainers.SubAgent)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// List indicates an expected call of List.
func (mr *MockSubAgentClientMockRecorder) List(ctx any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockSubAgentClient)(nil).List), ctx)
}
+1 -1
View File
@@ -1,4 +1,4 @@
// Package acmock contains a mock implementation of agentcontainers.Lister for use in tests.
package acmock
//go:generate mockgen -destination ./acmock.go -package acmock .. ContainerCLI,DevcontainerCLI,SubAgentClient
//go:generate mockgen -destination ./acmock.go -package acmock .. ContainerCLI,DevcontainerCLI
+15 -47
View File
@@ -562,9 +562,12 @@ func (api *API) discoverDevcontainersInProject(projectPath string) error {
api.broadcastUpdatesLocked()
if dc.Status == codersdk.WorkspaceAgentDevcontainerStatusStarting {
api.asyncWg.Go(func() {
api.asyncWg.Add(1)
go func() {
defer api.asyncWg.Done()
_ = api.CreateDevcontainer(dc.WorkspaceFolder, dc.ConfigPath)
})
}()
}
}
api.mu.Unlock()
@@ -1624,25 +1627,16 @@ func (api *API) cleanupSubAgents(ctx context.Context) error {
api.mu.Lock()
defer api.mu.Unlock()
// Collect all subagent IDs that should be kept:
// 1. Subagents currently tracked by injectedSubAgentProcs
// 2. Subagents referenced by known devcontainers from the manifest
var keep []uuid.UUID
injected := make(map[uuid.UUID]bool, len(api.injectedSubAgentProcs))
for _, proc := range api.injectedSubAgentProcs {
keep = append(keep, proc.agent.ID)
}
for _, dc := range api.knownDevcontainers {
if dc.SubagentID.Valid {
keep = append(keep, dc.SubagentID.UUID)
}
injected[proc.agent.ID] = true
}
ctx, cancel := context.WithTimeout(ctx, defaultOperationTimeout)
defer cancel()
var errs []error
for _, agent := range agents {
if slices.Contains(keep, agent.ID) {
if injected[agent.ID] {
continue
}
client := *api.subAgentClient.Load()
@@ -1653,11 +1647,10 @@ func (api *API) cleanupSubAgents(ctx context.Context) error {
slog.F("agent_id", agent.ID),
slog.F("agent_name", agent.Name),
)
errs = append(errs, xerrors.Errorf("delete agent %s (%s): %w", agent.Name, agent.ID, err))
}
}
return errors.Join(errs...)
return nil
}
// maybeInjectSubAgentIntoContainerLocked injects a subagent into a dev
@@ -2008,20 +2001,7 @@ func (api *API) maybeInjectSubAgentIntoContainerLocked(ctx context.Context, dc c
// logger.Warn(ctx, "set CAP_NET_ADMIN on agent binary failed", slog.Error(err))
// }
// Only delete and recreate subagents that were dynamically created
// (ID == uuid.Nil). Terraform-defined subagents (subAgentConfig.ID !=
// uuid.Nil) must not be deleted because they have attached resources
// managed by terraform.
isTerraformManaged := subAgentConfig.ID != uuid.Nil
configHasChanged := !proc.agent.EqualConfig(subAgentConfig)
logger.Debug(ctx, "checking if sub agent should be deleted",
slog.F("is_terraform_managed", isTerraformManaged),
slog.F("maybe_recreate_sub_agent", maybeRecreateSubAgent),
slog.F("config_has_changed", configHasChanged),
)
deleteSubAgent := !isTerraformManaged && maybeRecreateSubAgent && configHasChanged
deleteSubAgent := proc.agent.ID != uuid.Nil && maybeRecreateSubAgent && !proc.agent.EqualConfig(subAgentConfig)
if deleteSubAgent {
logger.Debug(ctx, "deleting existing subagent for recreation", slog.F("agent_id", proc.agent.ID))
client := *api.subAgentClient.Load()
@@ -2032,23 +2012,11 @@ func (api *API) maybeInjectSubAgentIntoContainerLocked(ctx context.Context, dc c
proc.agent = SubAgent{} // Clear agent to signal that we need to create a new one.
}
// Re-create (upsert) terraform-managed subagents when the config
// changes so that display apps and other settings are updated
// without deleting the agent.
recreateTerraformSubAgent := isTerraformManaged && maybeRecreateSubAgent && configHasChanged
if proc.agent.ID == uuid.Nil || recreateTerraformSubAgent {
if recreateTerraformSubAgent {
logger.Debug(ctx, "updating existing subagent",
slog.F("directory", subAgentConfig.Directory),
slog.F("display_apps", subAgentConfig.DisplayApps),
)
} else {
logger.Debug(ctx, "creating new subagent",
slog.F("directory", subAgentConfig.Directory),
slog.F("display_apps", subAgentConfig.DisplayApps),
)
}
if proc.agent.ID == uuid.Nil {
logger.Debug(ctx, "creating new subagent",
slog.F("directory", subAgentConfig.Directory),
slog.F("display_apps", subAgentConfig.DisplayApps),
)
// Create new subagent record in the database to receive the auth token.
// If we get a unique constraint violation, try with expanded names that
+9 -369
View File
@@ -437,11 +437,7 @@ func (m *fakeSubAgentClient) Create(ctx context.Context, agent agentcontainers.S
}
}
// Only generate a new ID if one wasn't provided. Terraform-defined
// subagents have pre-existing IDs that should be preserved.
if agent.ID == uuid.Nil {
agent.ID = uuid.New()
}
agent.ID = uuid.New()
agent.AuthToken = uuid.New()
if m.agents == nil {
m.agents = make(map[uuid.UUID]agentcontainers.SubAgent)
@@ -1039,30 +1035,6 @@ func TestAPI(t *testing.T) {
wantStatus: []int{http.StatusAccepted, http.StatusConflict},
wantBody: []string{"Devcontainer recreation initiated", "is currently starting and cannot be restarted"},
},
{
name: "Terraform-defined devcontainer can be rebuilt",
devcontainerID: devcontainerID1.String(),
setupDevcontainers: []codersdk.WorkspaceAgentDevcontainer{
{
ID: devcontainerID1,
Name: "test-devcontainer-terraform",
WorkspaceFolder: workspaceFolder1,
ConfigPath: configPath1,
Status: codersdk.WorkspaceAgentDevcontainerStatusRunning,
Container: &devContainer1,
SubagentID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
},
},
lister: &fakeContainerCLI{
containers: codersdk.WorkspaceAgentListContainersResponse{
Containers: []codersdk.WorkspaceAgentContainer{devContainer1},
},
arch: "<none>",
},
devcontainerCLI: &fakeDevcontainerCLI{},
wantStatus: []int{http.StatusAccepted, http.StatusConflict},
wantBody: []string{"Devcontainer recreation initiated", "is currently starting and cannot be restarted"},
},
}
for _, tt := range tests {
@@ -1477,6 +1449,14 @@ func TestAPI(t *testing.T) {
)
}
api := agentcontainers.NewAPI(logger, apiOpts...)
api.Start()
defer api.Close()
r := chi.NewRouter()
r.Mount("/", api.Routes())
var (
agentRunningCh chan struct{}
stopAgentCh chan struct{}
@@ -1493,14 +1473,6 @@ func TestAPI(t *testing.T) {
}
}
api := agentcontainers.NewAPI(logger, apiOpts...)
api.Start()
defer api.Close()
r := chi.NewRouter()
r.Mount("/", api.Routes())
tickerTrap.MustWait(ctx).MustRelease(ctx)
tickerTrap.Close()
@@ -2518,338 +2490,6 @@ func TestAPI(t *testing.T) {
assert.Empty(t, fakeSAC.agents)
})
t.Run("SubAgentCleanupPreservesTerraformDefined", func(t *testing.T) {
t.Parallel()
var (
// Given: A terraform-defined agent and devcontainer that should be preserved
terraformAgentID = uuid.New()
terraformAgentToken = uuid.New()
terraformAgent = agentcontainers.SubAgent{
ID: terraformAgentID,
Name: "terraform-defined-agent",
Directory: "/workspace",
AuthToken: terraformAgentToken,
}
terraformDevcontainer = codersdk.WorkspaceAgentDevcontainer{
ID: uuid.New(),
Name: "terraform-devcontainer",
WorkspaceFolder: "/workspace/project",
SubagentID: uuid.NullUUID{UUID: terraformAgentID, Valid: true},
}
// Given: An orphaned agent that should be cleaned up
orphanedAgentID = uuid.New()
orphanedAgentToken = uuid.New()
orphanedAgent = agentcontainers.SubAgent{
ID: orphanedAgentID,
Name: "orphaned-agent",
Directory: "/tmp",
AuthToken: orphanedAgentToken,
}
ctx = testutil.Context(t, testutil.WaitMedium)
logger = slog.Make()
mClock = quartz.NewMock(t)
mCCLI = acmock.NewMockContainerCLI(gomock.NewController(t))
fakeSAC = &fakeSubAgentClient{
logger: logger.Named("fakeSubAgentClient"),
agents: map[uuid.UUID]agentcontainers.SubAgent{
terraformAgentID: terraformAgent,
orphanedAgentID: orphanedAgent,
},
}
)
mCCLI.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{
Containers: []codersdk.WorkspaceAgentContainer{},
}, nil).AnyTimes()
mClock.Set(time.Now()).MustWait(ctx)
tickerTrap := mClock.Trap().TickerFunc("updaterLoop")
api := agentcontainers.NewAPI(logger,
agentcontainers.WithClock(mClock),
agentcontainers.WithContainerCLI(mCCLI),
agentcontainers.WithSubAgentClient(fakeSAC),
agentcontainers.WithDevcontainerCLI(&fakeDevcontainerCLI{}),
agentcontainers.WithDevcontainers([]codersdk.WorkspaceAgentDevcontainer{terraformDevcontainer}, nil),
)
api.Start()
defer api.Close()
tickerTrap.MustWait(ctx).MustRelease(ctx)
tickerTrap.Close()
// When: We advance the clock, allowing cleanup to occur
_, aw := mClock.AdvanceNext()
aw.MustWait(ctx)
// Then: The orphaned agent should be deleted
assert.Contains(t, fakeSAC.deleted, orphanedAgentID, "orphaned agent should be deleted")
// And: The terraform-defined agent should not be deleted
assert.NotContains(t, fakeSAC.deleted, terraformAgentID, "terraform-defined agent should be preserved")
assert.Len(t, fakeSAC.agents, 1, "only terraform agent should remain")
assert.Contains(t, fakeSAC.agents, terraformAgentID, "terraform agent should still exist")
})
t.Run("TerraformDefinedSubAgentNotRecreatedOnConfigChange", func(t *testing.T) {
t.Parallel()
if runtime.GOOS == "windows" {
t.Skip("Dev Container tests are not supported on Windows (this test uses mocks but fails due to Windows paths)")
}
var (
logger = slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
mCtrl = gomock.NewController(t)
// Given: A terraform-defined devcontainer with a pre-assigned subagent ID.
terraformAgentID = uuid.New()
terraformContainer = codersdk.WorkspaceAgentContainer{
ID: "test-container-id",
FriendlyName: "test-container",
Image: "test-image",
Running: true,
CreatedAt: time.Now(),
Labels: map[string]string{
agentcontainers.DevcontainerLocalFolderLabel: "/workspace/project",
agentcontainers.DevcontainerConfigFileLabel: "/workspace/project/.devcontainer/devcontainer.json",
},
}
terraformDevcontainer = codersdk.WorkspaceAgentDevcontainer{
ID: uuid.New(),
Name: "terraform-devcontainer",
WorkspaceFolder: "/workspace/project",
ConfigPath: "/workspace/project/.devcontainer/devcontainer.json",
SubagentID: uuid.NullUUID{UUID: terraformAgentID, Valid: true},
}
fCCLI = &fakeContainerCLI{
containers: codersdk.WorkspaceAgentListContainersResponse{
Containers: []codersdk.WorkspaceAgentContainer{terraformContainer},
},
arch: runtime.GOARCH,
}
fDCCLI = &fakeDevcontainerCLI{
upID: terraformContainer.ID,
readConfig: agentcontainers.DevcontainerConfig{
MergedConfiguration: agentcontainers.DevcontainerMergedConfiguration{
Customizations: agentcontainers.DevcontainerMergedCustomizations{
Coder: []agentcontainers.CoderCustomization{{
Apps: []agentcontainers.SubAgentApp{{Slug: "app1"}},
}},
},
},
},
}
mSAC = acmock.NewMockSubAgentClient(mCtrl)
closed bool
)
mSAC.EXPECT().List(gomock.Any()).Return([]agentcontainers.SubAgent{}, nil).AnyTimes()
// EXPECT: Create is called twice with the terraform-defined ID:
// once for the initial creation and once after the rebuild with
// config changes (upsert).
mSAC.EXPECT().Create(gomock.Any(), gomock.Any()).DoAndReturn(
func(_ context.Context, agent agentcontainers.SubAgent) (agentcontainers.SubAgent, error) {
assert.Equal(t, terraformAgentID, agent.ID, "agent should have terraform-defined ID")
agent.AuthToken = uuid.New()
return agent, nil
},
).Times(2)
// EXPECT: Delete may be called during Close, but not before.
mSAC.EXPECT().Delete(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, _ uuid.UUID) error {
assert.True(t, closed, "Delete should only be called after Close, not during recreation")
return nil
}).AnyTimes()
api := agentcontainers.NewAPI(logger,
agentcontainers.WithContainerCLI(fCCLI),
agentcontainers.WithDevcontainerCLI(fDCCLI),
agentcontainers.WithDevcontainers(
[]codersdk.WorkspaceAgentDevcontainer{terraformDevcontainer},
[]codersdk.WorkspaceAgentScript{{ID: terraformDevcontainer.ID, LogSourceID: uuid.New()}},
),
agentcontainers.WithSubAgentClient(mSAC),
agentcontainers.WithSubAgentURL("test-subagent-url"),
agentcontainers.WithWatcher(watcher.NewNoop()),
)
api.Start()
// Given: We create the devcontainer for the first time.
err := api.CreateDevcontainer(terraformDevcontainer.WorkspaceFolder, terraformDevcontainer.ConfigPath)
require.NoError(t, err)
// When: The container is recreated (new container ID) with config changes.
terraformContainer.ID = "new-container-id"
fCCLI.containers.Containers = []codersdk.WorkspaceAgentContainer{terraformContainer}
fDCCLI.upID = terraformContainer.ID
fDCCLI.readConfig.MergedConfiguration.Customizations.Coder = []agentcontainers.CoderCustomization{{
Apps: []agentcontainers.SubAgentApp{{Slug: "app2"}}, // Changed app triggers recreation logic.
}}
err = api.CreateDevcontainer(terraformDevcontainer.WorkspaceFolder, terraformDevcontainer.ConfigPath, agentcontainers.WithRemoveExistingContainer())
require.NoError(t, err)
// Then: Mock expectations verify that Create was called once and Delete was not called during recreation.
closed = true
api.Close()
})
// Verify that rebuilding a terraform-defined devcontainer via the
// HTTP API does not delete the sub agent. The sub agent should be
// preserved (Create called again with the same terraform ID) and
// display app changes should be picked up.
t.Run("TerraformDefinedSubAgentRebuildViaHTTP", func(t *testing.T) {
t.Parallel()
if runtime.GOOS == "windows" {
t.Skip("Dev Container tests are not supported on Windows (this test uses mocks but fails due to Windows paths)")
}
var (
ctx = testutil.Context(t, testutil.WaitMedium)
logger = slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
mCtrl = gomock.NewController(t)
terraformAgentID = uuid.New()
containerID = "test-container-id"
terraformContainer = codersdk.WorkspaceAgentContainer{
ID: containerID,
FriendlyName: "test-container",
Image: "test-image",
Running: true,
CreatedAt: time.Now(),
Labels: map[string]string{
agentcontainers.DevcontainerLocalFolderLabel: "/workspace/project",
agentcontainers.DevcontainerConfigFileLabel: "/workspace/project/.devcontainer/devcontainer.json",
},
}
terraformDevcontainer = codersdk.WorkspaceAgentDevcontainer{
ID: uuid.New(),
Name: "terraform-devcontainer",
WorkspaceFolder: "/workspace/project",
ConfigPath: "/workspace/project/.devcontainer/devcontainer.json",
SubagentID: uuid.NullUUID{UUID: terraformAgentID, Valid: true},
}
fCCLI = &fakeContainerCLI{
containers: codersdk.WorkspaceAgentListContainersResponse{
Containers: []codersdk.WorkspaceAgentContainer{terraformContainer},
},
arch: runtime.GOARCH,
}
fDCCLI = &fakeDevcontainerCLI{
upID: containerID,
readConfig: agentcontainers.DevcontainerConfig{
MergedConfiguration: agentcontainers.DevcontainerMergedConfiguration{
Customizations: agentcontainers.DevcontainerMergedCustomizations{
Coder: []agentcontainers.CoderCustomization{{
DisplayApps: map[codersdk.DisplayApp]bool{
codersdk.DisplayAppSSH: true,
codersdk.DisplayAppWebTerminal: true,
},
}},
},
},
},
}
mSAC = acmock.NewMockSubAgentClient(mCtrl)
closed bool
createCalled = make(chan agentcontainers.SubAgent, 2)
)
mSAC.EXPECT().List(gomock.Any()).Return([]agentcontainers.SubAgent{}, nil).AnyTimes()
// Create should be called twice: once for the initial injection
// and once after the rebuild picks up the new container.
mSAC.EXPECT().Create(gomock.Any(), gomock.Any()).DoAndReturn(
func(_ context.Context, agent agentcontainers.SubAgent) (agentcontainers.SubAgent, error) {
assert.Equal(t, terraformAgentID, agent.ID, "agent should always use terraform-defined ID")
agent.AuthToken = uuid.New()
createCalled <- agent
return agent, nil
},
).Times(2)
// Delete must only be called during Close, never during rebuild.
mSAC.EXPECT().Delete(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, _ uuid.UUID) error {
assert.True(t, closed, "Delete should only be called after Close, not during rebuild")
return nil
}).AnyTimes()
api := agentcontainers.NewAPI(logger,
agentcontainers.WithContainerCLI(fCCLI),
agentcontainers.WithDevcontainerCLI(fDCCLI),
agentcontainers.WithDevcontainers(
[]codersdk.WorkspaceAgentDevcontainer{terraformDevcontainer},
[]codersdk.WorkspaceAgentScript{{ID: terraformDevcontainer.ID, LogSourceID: uuid.New()}},
),
agentcontainers.WithSubAgentClient(mSAC),
agentcontainers.WithSubAgentURL("test-subagent-url"),
agentcontainers.WithWatcher(watcher.NewNoop()),
)
api.Start()
defer func() {
closed = true
api.Close()
}()
r := chi.NewRouter()
r.Mount("/", api.Routes())
// Perform the initial devcontainer creation directly to set up
// the subagent (mirrors the TerraformDefinedSubAgentNotRecreatedOnConfigChange
// test pattern).
err := api.CreateDevcontainer(terraformDevcontainer.WorkspaceFolder, terraformDevcontainer.ConfigPath)
require.NoError(t, err)
initialAgent := testutil.RequireReceive(ctx, t, createCalled)
assert.Equal(t, terraformAgentID, initialAgent.ID)
// Simulate container rebuild: new container ID, changed display apps.
newContainerID := "new-container-id"
terraformContainer.ID = newContainerID
fCCLI.containers.Containers = []codersdk.WorkspaceAgentContainer{terraformContainer}
fDCCLI.upID = newContainerID
fDCCLI.readConfig.MergedConfiguration.Customizations.Coder = []agentcontainers.CoderCustomization{{
DisplayApps: map[codersdk.DisplayApp]bool{
codersdk.DisplayAppSSH: true,
codersdk.DisplayAppWebTerminal: true,
codersdk.DisplayAppVSCodeDesktop: true,
codersdk.DisplayAppVSCodeInsiders: true,
},
}}
// Issue the rebuild request via the HTTP API.
req := httptest.NewRequest(http.MethodPost, "/devcontainers/"+terraformDevcontainer.ID.String()+"/recreate", nil).
WithContext(ctx)
rec := httptest.NewRecorder()
r.ServeHTTP(rec, req)
require.Equal(t, http.StatusAccepted, rec.Code)
// Wait for the post-rebuild injection to complete.
rebuiltAgent := testutil.RequireReceive(ctx, t, createCalled)
assert.Equal(t, terraformAgentID, rebuiltAgent.ID, "rebuilt agent should preserve terraform ID")
// Verify that the display apps were updated.
assert.Contains(t, rebuiltAgent.DisplayApps, codersdk.DisplayAppVSCodeDesktop,
"rebuilt agent should include updated display apps")
assert.Contains(t, rebuiltAgent.DisplayApps, codersdk.DisplayAppVSCodeInsiders,
"rebuilt agent should include updated display apps")
})
t.Run("Error", func(t *testing.T) {
t.Parallel()
+2 -10
View File
@@ -24,12 +24,10 @@ type SubAgent struct {
DisplayApps []codersdk.DisplayApp
}
// CloneConfig makes a copy of SubAgent using configuration from the
// devcontainer. The ID is inherited from dc.SubagentID if present, and
// the name is inherited from the devcontainer. AuthToken is not copied.
// CloneConfig makes a copy of SubAgent without ID and AuthToken. The
// name is inherited from the devcontainer.
func (s SubAgent) CloneConfig(dc codersdk.WorkspaceAgentDevcontainer) SubAgent {
return SubAgent{
ID: dc.SubagentID.UUID,
Name: dc.Name,
Directory: s.Directory,
Architecture: s.Architecture,
@@ -192,11 +190,6 @@ func (a *subAgentAPIClient) List(ctx context.Context) ([]SubAgent, error) {
func (a *subAgentAPIClient) Create(ctx context.Context, agent SubAgent) (_ SubAgent, err error) {
a.logger.Debug(ctx, "creating sub agent", slog.F("name", agent.Name), slog.F("directory", agent.Directory))
var id []byte
if agent.ID != uuid.Nil {
id = agent.ID[:]
}
displayApps := make([]agentproto.CreateSubAgentRequest_DisplayApp, 0, len(agent.DisplayApps))
for _, displayApp := range agent.DisplayApps {
var app agentproto.CreateSubAgentRequest_DisplayApp
@@ -235,7 +228,6 @@ func (a *subAgentAPIClient) Create(ctx context.Context, agent SubAgent) (_ SubAg
OperatingSystem: agent.OperatingSystem,
DisplayApps: displayApps,
Apps: apps,
Id: id,
})
if err != nil {
return SubAgent{}, err
-125
View File
@@ -306,128 +306,3 @@ func TestSubAgentClient_CreateWithDisplayApps(t *testing.T) {
}
})
}
func TestSubAgent_CloneConfig(t *testing.T) {
t.Parallel()
t.Run("CopiesIDFromDevcontainer", func(t *testing.T) {
t.Parallel()
subAgent := agentcontainers.SubAgent{
ID: uuid.New(),
Name: "original-name",
Directory: "/workspace",
Architecture: "amd64",
OperatingSystem: "linux",
DisplayApps: []codersdk.DisplayApp{codersdk.DisplayAppVSCodeDesktop},
Apps: []agentcontainers.SubAgentApp{{Slug: "app1"}},
}
expectedID := uuid.MustParse("550e8400-e29b-41d4-a716-446655440000")
dc := codersdk.WorkspaceAgentDevcontainer{
Name: "devcontainer-name",
SubagentID: uuid.NullUUID{UUID: expectedID, Valid: true},
}
cloned := subAgent.CloneConfig(dc)
assert.Equal(t, expectedID, cloned.ID)
assert.Equal(t, dc.Name, cloned.Name)
assert.Equal(t, subAgent.Directory, cloned.Directory)
assert.Zero(t, cloned.AuthToken, "AuthToken should not be copied")
})
t.Run("HandlesNilSubagentID", func(t *testing.T) {
t.Parallel()
subAgent := agentcontainers.SubAgent{
ID: uuid.New(),
Name: "original-name",
Directory: "/workspace",
Architecture: "amd64",
OperatingSystem: "linux",
}
dc := codersdk.WorkspaceAgentDevcontainer{
Name: "devcontainer-name",
SubagentID: uuid.NullUUID{Valid: false},
}
cloned := subAgent.CloneConfig(dc)
assert.Equal(t, uuid.Nil, cloned.ID)
})
}
func TestSubAgent_EqualConfig(t *testing.T) {
t.Parallel()
base := agentcontainers.SubAgent{
ID: uuid.New(),
Name: "test-agent",
Directory: "/workspace",
Architecture: "amd64",
OperatingSystem: "linux",
DisplayApps: []codersdk.DisplayApp{codersdk.DisplayAppVSCodeDesktop},
Apps: []agentcontainers.SubAgentApp{
{Slug: "test-app", DisplayName: "Test App"},
},
}
tests := []struct {
name string
modify func(*agentcontainers.SubAgent)
wantEqual bool
}{
{
name: "identical",
modify: func(s *agentcontainers.SubAgent) {},
wantEqual: true,
},
{
name: "different ID",
modify: func(s *agentcontainers.SubAgent) { s.ID = uuid.New() },
wantEqual: true,
},
{
name: "different Name",
modify: func(s *agentcontainers.SubAgent) { s.Name = "different-name" },
wantEqual: false,
},
{
name: "different Directory",
modify: func(s *agentcontainers.SubAgent) { s.Directory = "/different/path" },
wantEqual: false,
},
{
name: "different Architecture",
modify: func(s *agentcontainers.SubAgent) { s.Architecture = "arm64" },
wantEqual: false,
},
{
name: "different OperatingSystem",
modify: func(s *agentcontainers.SubAgent) { s.OperatingSystem = "windows" },
wantEqual: false,
},
{
name: "different DisplayApps",
modify: func(s *agentcontainers.SubAgent) { s.DisplayApps = []codersdk.DisplayApp{codersdk.DisplayAppSSH} },
wantEqual: false,
},
{
name: "different Apps",
modify: func(s *agentcontainers.SubAgent) {
s.Apps = []agentcontainers.SubAgentApp{{Slug: "different-app", DisplayName: "Different App"}}
},
wantEqual: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
modified := base
tt.modify(&modified)
assert.Equal(t, tt.wantEqual, base.EqualConfig(modified))
})
}
}
+1 -4
View File
@@ -99,10 +99,7 @@ func (c *Client) SyncReady(ctx context.Context, unitName unit.ID) (bool, error)
resp, err := c.client.SyncReady(ctx, &proto.SyncReadyRequest{
Unit: string(unitName),
})
if err != nil {
return false, xerrors.Errorf("sync ready: %w", err)
}
return resp.Ready, nil
return resp.Ready, err
}
// SyncStatus gets the status of a unit and its dependencies.
+103 -2
View File
@@ -1,22 +1,37 @@
package agentsocket_test
import (
"context"
"path/filepath"
"runtime"
"testing"
"github.com/google/uuid"
"github.com/spf13/afero"
"github.com/stretchr/testify/require"
"cdr.dev/slog/v3"
"github.com/coder/coder/v2/agent"
"github.com/coder/coder/v2/agent/agentsocket"
"github.com/coder/coder/v2/agent/agenttest"
agentproto "github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/codersdk/agentsdk"
"github.com/coder/coder/v2/tailnet"
"github.com/coder/coder/v2/tailnet/tailnettest"
"github.com/coder/coder/v2/testutil"
)
func TestServer(t *testing.T) {
t.Parallel()
if runtime.GOOS == "windows" {
t.Skip("agentsocket is not supported on Windows")
}
t.Run("StartStop", func(t *testing.T) {
t.Parallel()
socketPath := testutil.AgentSocketPath(t)
socketPath := filepath.Join(t.TempDir(), "test.sock")
logger := slog.Make().Leveled(slog.LevelDebug)
server, err := agentsocket.NewServer(logger, agentsocket.WithPath(socketPath))
require.NoError(t, err)
@@ -26,7 +41,7 @@ func TestServer(t *testing.T) {
t.Run("AlreadyStarted", func(t *testing.T) {
t.Parallel()
socketPath := testutil.AgentSocketPath(t)
socketPath := filepath.Join(t.TempDir(), "test.sock")
logger := slog.Make().Leveled(slog.LevelDebug)
server1, err := agentsocket.NewServer(logger, agentsocket.WithPath(socketPath))
require.NoError(t, err)
@@ -34,4 +49,90 @@ func TestServer(t *testing.T) {
_, err = agentsocket.NewServer(logger, agentsocket.WithPath(socketPath))
require.ErrorContains(t, err, "create socket")
})
t.Run("AutoSocketPath", func(t *testing.T) {
t.Parallel()
socketPath := filepath.Join(t.TempDir(), "test.sock")
logger := slog.Make().Leveled(slog.LevelDebug)
server, err := agentsocket.NewServer(logger, agentsocket.WithPath(socketPath))
require.NoError(t, err)
require.NoError(t, server.Close())
})
}
func TestServerWindowsNotSupported(t *testing.T) {
t.Parallel()
if runtime.GOOS != "windows" {
t.Skip("this test only runs on Windows")
}
t.Run("NewServer", func(t *testing.T) {
t.Parallel()
socketPath := filepath.Join(t.TempDir(), "test.sock")
logger := slog.Make().Leveled(slog.LevelDebug)
_, err := agentsocket.NewServer(logger, agentsocket.WithPath(socketPath))
require.ErrorContains(t, err, "agentsocket is not supported on Windows")
})
t.Run("NewClient", func(t *testing.T) {
t.Parallel()
_, err := agentsocket.NewClient(context.Background(), agentsocket.WithPath("test.sock"))
require.ErrorContains(t, err, "agentsocket is not supported on Windows")
})
}
func TestAgentInitializesOnWindowsWithoutSocketServer(t *testing.T) {
t.Parallel()
if runtime.GOOS != "windows" {
t.Skip("this test only runs on Windows")
}
ctx := testutil.Context(t, testutil.WaitShort)
logger := testutil.Logger(t).Named("agent")
derpMap, _ := tailnettest.RunDERPAndSTUN(t)
coordinator := tailnet.NewCoordinator(logger)
t.Cleanup(func() {
_ = coordinator.Close()
})
statsCh := make(chan *agentproto.Stats, 50)
agentID := uuid.New()
manifest := agentsdk.Manifest{
AgentID: agentID,
AgentName: "test-agent",
WorkspaceName: "test-workspace",
OwnerName: "test-user",
WorkspaceID: uuid.New(),
DERPMap: derpMap,
}
client := agenttest.NewClient(t, logger.Named("agenttest"), agentID, manifest, statsCh, coordinator)
t.Cleanup(client.Close)
options := agent.Options{
Client: client,
Filesystem: afero.NewMemMapFs(),
Logger: logger.Named("agent"),
ReconnectingPTYTimeout: testutil.WaitShort,
EnvironmentVariables: map[string]string{},
SocketPath: "",
}
agnt := agent.New(options)
t.Cleanup(func() {
_ = agnt.Close()
})
startup := testutil.TryReceive(ctx, t, client.GetStartup())
require.NotNil(t, startup, "agent should send startup message")
err := agnt.Close()
require.NoError(t, err, "agent should close cleanly")
}
+17 -11
View File
@@ -2,6 +2,8 @@ package agentsocket_test
import (
"context"
"path/filepath"
"runtime"
"testing"
"github.com/stretchr/testify/require"
@@ -28,10 +30,14 @@ func newSocketClient(ctx context.Context, t *testing.T, socketPath string) *agen
func TestDRPCAgentSocketService(t *testing.T) {
t.Parallel()
if runtime.GOOS == "windows" {
t.Skip("agentsocket is not supported on Windows")
}
t.Run("Ping", func(t *testing.T) {
t.Parallel()
socketPath := testutil.AgentSocketPath(t)
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
@@ -51,7 +57,7 @@ func TestDRPCAgentSocketService(t *testing.T) {
t.Run("NewUnit", func(t *testing.T) {
t.Parallel()
socketPath := testutil.AgentSocketPath(t)
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
@@ -73,7 +79,7 @@ func TestDRPCAgentSocketService(t *testing.T) {
t.Run("UnitAlreadyStarted", func(t *testing.T) {
t.Parallel()
socketPath := testutil.AgentSocketPath(t)
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
@@ -103,7 +109,7 @@ func TestDRPCAgentSocketService(t *testing.T) {
t.Run("UnitAlreadyCompleted", func(t *testing.T) {
t.Parallel()
socketPath := testutil.AgentSocketPath(t)
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
@@ -142,7 +148,7 @@ func TestDRPCAgentSocketService(t *testing.T) {
t.Run("UnitNotReady", func(t *testing.T) {
t.Parallel()
socketPath := testutil.AgentSocketPath(t)
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
@@ -172,7 +178,7 @@ func TestDRPCAgentSocketService(t *testing.T) {
t.Run("NewUnits", func(t *testing.T) {
t.Parallel()
socketPath := testutil.AgentSocketPath(t)
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
@@ -197,7 +203,7 @@ func TestDRPCAgentSocketService(t *testing.T) {
t.Run("DependencyAlreadyRegistered", func(t *testing.T) {
t.Parallel()
socketPath := testutil.AgentSocketPath(t)
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
@@ -232,7 +238,7 @@ func TestDRPCAgentSocketService(t *testing.T) {
t.Run("DependencyAddedAfterDependentStarted", func(t *testing.T) {
t.Parallel()
socketPath := testutil.AgentSocketPath(t)
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
@@ -274,7 +280,7 @@ func TestDRPCAgentSocketService(t *testing.T) {
t.Run("UnregisteredUnit", func(t *testing.T) {
t.Parallel()
socketPath := testutil.AgentSocketPath(t)
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
@@ -293,7 +299,7 @@ func TestDRPCAgentSocketService(t *testing.T) {
t.Run("UnitNotReady", func(t *testing.T) {
t.Parallel()
socketPath := testutil.AgentSocketPath(t)
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
@@ -317,7 +323,7 @@ func TestDRPCAgentSocketService(t *testing.T) {
t.Run("UnitReady", func(t *testing.T) {
t.Parallel()
socketPath := testutil.AgentSocketPath(t)
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
ctx := testutil.Context(t, testutil.WaitShort)
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
+6 -47
View File
@@ -4,60 +4,19 @@ package agentsocket
import (
"context"
"fmt"
"net"
"os"
"os/user"
"strings"
"github.com/Microsoft/go-winio"
"golang.org/x/xerrors"
)
const defaultSocketPath = `\\.\pipe\com.coder.agentsocket`
func createSocket(path string) (net.Listener, error) {
if path == "" {
path = defaultSocketPath
}
if !strings.HasPrefix(path, `\\.\pipe\`) {
return nil, xerrors.Errorf("%q is not a valid local socket path", path)
}
user, err := user.Current()
if err != nil {
return nil, fmt.Errorf("unable to look up current user: %w", err)
}
sid := user.Uid
// SecurityDescriptor is in SDDL format. c.f.
// https://learn.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format for full details.
// D: indicates this is a Discretionary Access Control List (DACL), which is Windows-speak for ACLs that allow or
// deny access (as opposed to SACL which controls audit logging).
// P indicates that this DACL is "protected" from being modified thru inheritance
// () delimit access control entries (ACEs), here we only have one, which, allows (A) generic all (GA) access to our
// specific user's security ID (SID).
//
// Note that although Microsoft docs at https://learn.microsoft.com/en-us/windows/win32/ipc/named-pipes warns that
// named pipes are accessible from remote machines in the general case, the `winio` package sets the flag
// windows.FILE_PIPE_REJECT_REMOTE_CLIENTS when creating pipes, so connections from remote machines are always
// denied. This is important because we sort of expect customers to run the Coder agent under a generic user
// account unless they are very sophisticated. We don't want this socket to cross the boundary of the local machine.
configuration := &winio.PipeConfig{
SecurityDescriptor: fmt.Sprintf("D:P(A;;GA;;;%s)", sid),
}
listener, err := winio.ListenPipe(path, configuration)
if err != nil {
return nil, xerrors.Errorf("failed to open named pipe: %w", err)
}
return listener, nil
func createSocket(_ string) (net.Listener, error) {
return nil, xerrors.New("agentsocket is not supported on Windows")
}
func cleanupSocket(path string) error {
return os.Remove(path)
func cleanupSocket(_ string) error {
return nil
}
func dialSocket(ctx context.Context, path string) (net.Conn, error) {
return winio.DialPipeContext(ctx, path)
func dialSocket(_ context.Context, _ string) (net.Conn, error) {
return nil, xerrors.New("agentsocket is not supported on Windows")
}
-6
View File
@@ -124,12 +124,6 @@ func (c *Client) Close() {
c.derpMapOnce.Do(func() { close(c.derpMapUpdates) })
}
func (c *Client) ConnectRPC28WithRole(ctx context.Context, _ string) (
agentproto.DRPCAgentClient28, proto.DRPCTailnetClient28, error,
) {
return c.ConnectRPC28(ctx)
}
func (c *Client) ConnectRPC28(ctx context.Context) (
agentproto.DRPCAgentClient28, proto.DRPCTailnetClient28, error,
) {
-9
View File
@@ -4,8 +4,6 @@ import (
"os"
"github.com/hashicorp/go-reap"
"cdr.dev/slog/v3"
)
type Option func(o *options)
@@ -36,15 +34,8 @@ func WithCatchSignals(sigs ...os.Signal) Option {
}
}
func WithLogger(logger slog.Logger) Option {
return func(o *options) {
o.Logger = logger
}
}
type options struct {
ExecArgs []string
PIDs reap.PidCh
CatchSignals []os.Signal
Logger slog.Logger
}
+2 -14
View File
@@ -3,15 +3,12 @@
package reaper
import (
"context"
"os"
"os/signal"
"syscall"
"github.com/hashicorp/go-reap"
"golang.org/x/xerrors"
"cdr.dev/slog/v3"
)
// IsInitProcess returns true if the current process's PID is 1.
@@ -19,7 +16,7 @@ func IsInitProcess() bool {
return os.Getpid() == 1
}
func catchSignals(logger slog.Logger, pid int, sigs []os.Signal) {
func catchSignals(pid int, sigs []os.Signal) {
if len(sigs) == 0 {
return
}
@@ -28,19 +25,10 @@ func catchSignals(logger slog.Logger, pid int, sigs []os.Signal) {
signal.Notify(sc, sigs...)
defer signal.Stop(sc)
logger.Info(context.Background(), "reaper catching signals",
slog.F("signals", sigs),
slog.F("child_pid", pid),
)
for {
s := <-sc
sig, ok := s.(syscall.Signal)
if ok {
logger.Info(context.Background(), "reaper caught signal, killing child process",
slog.F("signal", sig.String()),
slog.F("child_pid", pid),
)
_ = syscall.Kill(pid, sig)
}
}
@@ -90,7 +78,7 @@ func ForkReap(opt ...Option) (int, error) {
return 1, xerrors.Errorf("fork exec: %w", err)
}
go catchSignals(opts.Logger, pid, opts.CatchSignals)
go catchSignals(pid, opts.CatchSignals)
var wstatus syscall.WaitStatus
_, err = syscall.Wait4(pid, &wstatus, 0, nil)
+21 -25
View File
@@ -3,11 +3,11 @@
"enabled": true,
"clientKind": "git",
"useIgnoreFile": true,
"defaultBranch": "main",
"defaultBranch": "main"
},
"files": {
"includes": ["**", "!**/pnpm-lock.yaml"],
"ignoreUnknown": true,
"ignoreUnknown": true
},
"linter": {
"rules": {
@@ -15,18 +15,18 @@
"noSvgWithoutTitle": "off",
"useButtonType": "off",
"useSemanticElements": "off",
"noStaticElementInteractions": "off",
"noStaticElementInteractions": "off"
},
"correctness": {
"noUnusedImports": "warn",
"correctness": {
"noUnusedImports": "warn",
"useUniqueElementIds": "off", // TODO: This is new but we want to fix it
"noNestedComponentDefinitions": "off", // TODO: Investigate, since it is used by shadcn components
"noUnusedVariables": {
"level": "warn",
"noUnusedVariables": {
"level": "warn",
"options": {
"ignoreRestSiblings": true,
},
},
"ignoreRestSiblings": true
}
}
},
"style": {
"noNonNullAssertion": "off",
@@ -45,10 +45,6 @@
"level": "error",
"options": {
"paths": {
"react": {
"message": "React 19 no longer requires forwardRef. Use ref as a prop instead.",
"importNames": ["forwardRef"],
},
// "@mui/material/Alert": "Use components/Alert/Alert instead.",
// "@mui/material/AlertTitle": "Use components/Alert/Alert instead.",
// "@mui/material/Autocomplete": "Use shadcn/ui Combobox instead.",
@@ -115,10 +111,10 @@
"@emotion/styled": "Use Tailwind CSS instead.",
// "@emotion/cache": "Use Tailwind CSS instead.",
// "components/Stack/Stack": "Use Tailwind flex utilities instead (e.g., <div className='flex flex-col gap-4'>).",
"lodash": "Use lodash/<name> instead.",
},
},
},
"lodash": "Use lodash/<name> instead."
}
}
}
},
"suspicious": {
"noArrayIndexKey": "off",
@@ -129,14 +125,14 @@
"noConsole": {
"level": "error",
"options": {
"allow": ["error", "info", "warn"],
},
},
"allow": ["error", "info", "warn"]
}
}
},
"complexity": {
"noImportantStyles": "off", // TODO: check and fix !important styles
},
},
"noImportantStyles": "off" // TODO: check and fix !important styles
}
}
},
"$schema": "./node_modules/@biomejs/biome/configuration_schema.json",
"$schema": "./node_modules/@biomejs/biome/configuration_schema.json"
}
+16 -44
View File
@@ -9,7 +9,6 @@ import (
"net/http/pprof"
"net/url"
"os"
"os/signal"
"path/filepath"
"runtime"
"slices"
@@ -131,7 +130,6 @@ func workspaceAgent() *serpent.Command {
sinks = append(sinks, sloghuman.Sink(logWriter))
logger := inv.Logger.AppendSinks(sinks...).Leveled(slog.LevelDebug)
logger = logger.Named("reaper")
logger.Info(ctx, "spawning reaper process")
// Do not start a reaper on the child process. It's important
@@ -141,19 +139,31 @@ func workspaceAgent() *serpent.Command {
exitCode, err := reaper.ForkReap(
reaper.WithExecArgs(args...),
reaper.WithCatchSignals(StopSignals...),
reaper.WithLogger(logger),
)
if err != nil {
logger.Error(ctx, "agent process reaper unable to fork", slog.Error(err))
return xerrors.Errorf("fork reap: %w", err)
}
logger.Info(ctx, "child process exited, propagating exit code",
slog.F("exit_code", exitCode),
)
logger.Info(ctx, "reaper child process exited", slog.F("exit_code", exitCode))
return ExitError(exitCode, nil)
}
// Handle interrupt signals to allow for graceful shutdown,
// note that calling stopNotify disables the signal handler
// and the next interrupt will terminate the program (you
// probably want cancel instead).
//
// Note that we don't want to handle these signals in the
// process that runs as PID 1, that's why we do this after
// the reaper forked.
ctx, stopNotify := inv.SignalNotifyContext(ctx, StopSignals...)
defer stopNotify()
// DumpHandler does signal handling, so we call it after the
// reaper.
go DumpHandler(ctx, "agent")
logWriter := &clilog.LumberjackWriteCloseFixer{Writer: &lumberjack.Logger{
Filename: filepath.Join(logDir, "coder-agent.log"),
MaxSize: 5, // MB
@@ -166,21 +176,6 @@ func workspaceAgent() *serpent.Command {
sinks = append(sinks, sloghuman.Sink(logWriter))
logger := inv.Logger.AppendSinks(sinks...).Leveled(slog.LevelDebug)
// Handle interrupt signals to allow for graceful shutdown,
// note that calling stopNotify disables the signal handler
// and the next interrupt will terminate the program (you
// probably want cancel instead).
//
// Note that we also handle these signals in the
// process that runs as PID 1, mainly to forward it to the agent child
// so that it can shutdown gracefully.
ctx, stopNotify := logSignalNotifyContext(ctx, logger, StopSignals...)
defer stopNotify()
// DumpHandler does signal handling, so we call it after the
// reaper.
go DumpHandler(ctx, "agent")
version := buildinfo.Version()
logger.Info(ctx, "agent is starting now",
slog.F("url", agentAuth.agentURL),
@@ -570,26 +565,3 @@ func urlPort(u string) (int, error) {
}
return -1, xerrors.Errorf("invalid port: %s", u)
}
// logSignalNotifyContext is like signal.NotifyContext but logs the received
// signal before canceling the context.
func logSignalNotifyContext(parent context.Context, logger slog.Logger, signals ...os.Signal) (context.Context, context.CancelFunc) {
ctx, cancel := context.WithCancelCause(parent)
c := make(chan os.Signal, 1)
signal.Notify(c, signals...)
go func() {
select {
case sig := <-c:
logger.Info(ctx, "agent received signal", slog.F("signal", sig.String()))
cancel(xerrors.Errorf("signal: %s", sig.String()))
case <-ctx.Done():
logger.Info(ctx, "ctx canceled, stopping signal handler")
}
}()
return ctx, func() {
cancel(context.Canceled)
signal.Stop(c)
}
}
-2
View File
@@ -23,9 +23,7 @@ func (r *RootCmd) organizations() *serpent.Command {
},
Children: []*serpent.Command{
r.showOrganization(orgContext),
r.listOrganizations(),
r.createOrganization(),
r.deleteOrganization(orgContext),
r.organizationMembers(orgContext),
r.organizationRoles(orgContext),
r.organizationSettings(orgContext),
-165
View File
@@ -1,13 +1,10 @@
package cli_test
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"sync/atomic"
"testing"
"time"
@@ -15,10 +12,8 @@ import (
"github.com/stretchr/testify/require"
"github.com/coder/coder/v2/cli/clitest"
"github.com/coder/coder/v2/cli/cliui"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/pty/ptytest"
"github.com/coder/pretty"
)
func TestCurrentOrganization(t *testing.T) {
@@ -59,166 +54,6 @@ func TestCurrentOrganization(t *testing.T) {
})
}
func TestOrganizationList(t *testing.T) {
t.Parallel()
t.Run("OK", func(t *testing.T) {
t.Parallel()
orgID := uuid.New()
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch {
case r.Method == http.MethodGet && r.URL.Path == "/api/v2/organizations":
_ = json.NewEncoder(w).Encode([]codersdk.Organization{
{
MinimalOrganization: codersdk.MinimalOrganization{
ID: orgID,
Name: "my-org",
DisplayName: "My Org",
},
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
})
default:
t.Errorf("unexpected request: %s %s", r.Method, r.URL.Path)
w.WriteHeader(http.StatusNotFound)
}
}))
defer server.Close()
client := codersdk.New(must(url.Parse(server.URL)))
inv, root := clitest.New(t, "organizations", "list")
clitest.SetupConfig(t, client, root)
buf := new(bytes.Buffer)
inv.Stdout = buf
require.NoError(t, inv.Run())
require.Contains(t, buf.String(), "my-org")
require.Contains(t, buf.String(), "My Org")
require.Contains(t, buf.String(), orgID.String())
})
}
func TestOrganizationDelete(t *testing.T) {
t.Parallel()
t.Run("Yes", func(t *testing.T) {
t.Parallel()
orgID := uuid.New()
var deleteCalled atomic.Bool
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch {
case r.Method == http.MethodGet && r.URL.Path == "/api/v2/organizations/my-org":
_ = json.NewEncoder(w).Encode(codersdk.Organization{
MinimalOrganization: codersdk.MinimalOrganization{
ID: orgID,
Name: "my-org",
},
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
})
case r.Method == http.MethodDelete && r.URL.Path == fmt.Sprintf("/api/v2/organizations/%s", orgID.String()):
deleteCalled.Store(true)
w.WriteHeader(http.StatusOK)
default:
t.Errorf("unexpected request: %s %s", r.Method, r.URL.Path)
w.WriteHeader(http.StatusNotFound)
}
}))
defer server.Close()
client := codersdk.New(must(url.Parse(server.URL)))
inv, root := clitest.New(t, "organizations", "delete", "my-org", "--yes")
clitest.SetupConfig(t, client, root)
require.NoError(t, inv.Run())
require.True(t, deleteCalled.Load(), "expected delete request")
})
t.Run("Prompted", func(t *testing.T) {
t.Parallel()
orgID := uuid.New()
var deleteCalled atomic.Bool
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch {
case r.Method == http.MethodGet && r.URL.Path == "/api/v2/organizations/my-org":
_ = json.NewEncoder(w).Encode(codersdk.Organization{
MinimalOrganization: codersdk.MinimalOrganization{
ID: orgID,
Name: "my-org",
},
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
})
case r.Method == http.MethodDelete && r.URL.Path == fmt.Sprintf("/api/v2/organizations/%s", orgID.String()):
deleteCalled.Store(true)
w.WriteHeader(http.StatusOK)
default:
t.Errorf("unexpected request: %s %s", r.Method, r.URL.Path)
w.WriteHeader(http.StatusNotFound)
}
}))
defer server.Close()
client := codersdk.New(must(url.Parse(server.URL)))
inv, root := clitest.New(t, "organizations", "delete", "my-org")
clitest.SetupConfig(t, client, root)
pty := ptytest.New(t).Attach(inv)
execDone := make(chan error)
go func() {
execDone <- inv.Run()
}()
pty.ExpectMatch(fmt.Sprintf("Delete organization %s?", pretty.Sprint(cliui.DefaultStyles.Code, "my-org")))
pty.WriteLine("yes")
require.NoError(t, <-execDone)
require.True(t, deleteCalled.Load(), "expected delete request")
})
t.Run("Default", func(t *testing.T) {
t.Parallel()
orgID := uuid.New()
var deleteCalled atomic.Bool
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch {
case r.Method == http.MethodGet && r.URL.Path == "/api/v2/organizations/default":
_ = json.NewEncoder(w).Encode(codersdk.Organization{
MinimalOrganization: codersdk.MinimalOrganization{
ID: orgID,
Name: "default",
},
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
IsDefault: true,
})
case r.Method == http.MethodDelete:
deleteCalled.Store(true)
w.WriteHeader(http.StatusOK)
default:
t.Errorf("unexpected request: %s %s", r.Method, r.URL.Path)
w.WriteHeader(http.StatusNotFound)
}
}))
defer server.Close()
client := codersdk.New(must(url.Parse(server.URL)))
inv, root := clitest.New(t, "organizations", "delete", "default", "--yes")
clitest.SetupConfig(t, client, root)
err := inv.Run()
require.Error(t, err)
require.ErrorContains(t, err, "default organization")
require.False(t, deleteCalled.Load(), "expected no delete request")
})
}
func must[V any](v V, err error) V {
if err != nil {
panic(err)
-65
View File
@@ -1,65 +0,0 @@
package cli
import (
"fmt"
"time"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/cli/cliui"
"github.com/coder/pretty"
"github.com/coder/serpent"
)
func (r *RootCmd) deleteOrganization(_ *OrganizationContext) *serpent.Command {
cmd := &serpent.Command{
Use: "delete <organization_name_or_id>",
Short: "Delete an organization",
Middleware: serpent.Chain(
serpent.RequireNArgs(1),
),
Options: serpent.OptionSet{
cliui.SkipPromptOption(),
},
Handler: func(inv *serpent.Invocation) error {
client, err := r.InitClient(inv)
if err != nil {
return err
}
orgArg := inv.Args[0]
organization, err := client.OrganizationByName(inv.Context(), orgArg)
if err != nil {
return err
}
if organization.IsDefault {
return xerrors.Errorf("cannot delete the default organization %q", organization.Name)
}
_, err = cliui.Prompt(inv, cliui.PromptOptions{
Text: fmt.Sprintf("Delete organization %s?", pretty.Sprint(cliui.DefaultStyles.Code, organization.Name)),
IsConfirm: true,
Default: cliui.ConfirmNo,
})
if err != nil {
return err
}
err = client.DeleteOrganization(inv.Context(), organization.ID.String())
if err != nil {
return xerrors.Errorf("delete organization %q: %w", organization.Name, err)
}
_, _ = fmt.Fprintf(
inv.Stdout,
"Deleted organization %s at %s\n",
pretty.Sprint(cliui.DefaultStyles.Keyword, organization.Name),
cliui.Timestamp(time.Now()),
)
return nil
},
}
return cmd
}
-53
View File
@@ -1,53 +0,0 @@
package cli
import (
"fmt"
"github.com/coder/coder/v2/cli/cliui"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/serpent"
)
func (r *RootCmd) listOrganizations() *serpent.Command {
formatter := cliui.NewOutputFormatter(
cliui.TableFormat([]codersdk.Organization{}, []string{"name", "display name", "id", "default"}),
cliui.JSONFormat(),
)
cmd := &serpent.Command{
Use: "list",
Short: "List all organizations",
Long: "List all organizations. Requires a role which grants ResourceOrganization: read.",
Aliases: []string{"ls"},
Middleware: serpent.Chain(
serpent.RequireNArgs(0),
),
Handler: func(inv *serpent.Invocation) error {
client, err := r.InitClient(inv)
if err != nil {
return err
}
organizations, err := client.Organizations(inv.Context())
if err != nil {
return err
}
out, err := formatter.Format(inv.Context(), organizations)
if err != nil {
return err
}
if out == "" {
cliui.Infof(inv.Stderr, "No organizations found.")
return nil
}
_, err = fmt.Fprintln(inv.Stdout, out)
return err
},
}
formatter.AttachOptions(&cmd.Options)
return cmd
}
+4 -15
View File
@@ -884,27 +884,16 @@ func (o *OrganizationContext) Selected(inv *serpent.Invocation, client *codersdk
index := slices.IndexFunc(orgs, func(org codersdk.Organization) bool {
return org.Name == o.FlagSelect || org.ID.String() == o.FlagSelect
})
if index >= 0 {
return orgs[index], nil
}
// Not in membership list - try direct fetch.
// This allows site-wide admins (e.g., Owners) to use orgs they aren't
// members of.
org, err := client.OrganizationByName(inv.Context(), o.FlagSelect)
if err != nil {
if index < 0 {
var names []string
for _, org := range orgs {
names = append(names, org.Name)
}
var sdkErr *codersdk.Error
if errors.As(err, &sdkErr) && sdkErr.StatusCode() == http.StatusNotFound {
return codersdk.Organization{}, xerrors.Errorf("organization %q not found, are you sure you are a member of this organization? "+
"Valid options for '--org=' are [%s].", o.FlagSelect, strings.Join(names, ", "))
}
return codersdk.Organization{}, xerrors.Errorf("get organization %q: %w", o.FlagSelect, err)
return codersdk.Organization{}, xerrors.Errorf("organization %q not found, are you sure you are a member of this organization? "+
"Valid options for '--org=' are [%s].", o.FlagSelect, strings.Join(names, ", "))
}
return org, nil
return orgs[index], nil
}
if len(orgs) == 1 {
+2 -18
View File
@@ -95,7 +95,6 @@ import (
"github.com/coder/coder/v2/coderd/webpush"
"github.com/coder/coder/v2/coderd/workspaceapps/appurl"
"github.com/coder/coder/v2/coderd/workspacestats"
"github.com/coder/coder/v2/coderd/wsbuilder"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/codersdk/drpcsdk"
"github.com/coder/coder/v2/cryptorand"
@@ -137,15 +136,6 @@ func createOIDCConfig(ctx context.Context, logger slog.Logger, vals *codersdk.De
if err != nil {
return nil, xerrors.Errorf("parse oidc oauth callback url: %w", err)
}
if vals.OIDC.RedirectURL.String() != "" {
redirectURL, err = vals.OIDC.RedirectURL.Value().Parse("/api/v2/users/oidc/callback")
if err != nil {
return nil, xerrors.Errorf("parse oidc redirect url %q", err)
}
logger.Warn(ctx, "custom OIDC redirect URL used instead of 'access_url', ensure this matches the value configured in your OIDC provider")
}
// If the scopes contain 'groups', we enable group support.
// Do not override any custom value set by the user.
if slice.Contains(vals.OIDC.Scopes, "groups") && vals.OIDC.GroupField == "" {
@@ -945,12 +935,6 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
options.StatsBatcher = batcher
defer closeBatcher()
wsBuilderMetrics, err := wsbuilder.NewMetrics(options.PrometheusRegistry)
if err != nil {
return xerrors.Errorf("failed to register workspace builder metrics: %w", err)
}
options.WorkspaceBuilderMetrics = wsBuilderMetrics
// Manage notifications.
var (
notificationsCfg = options.DeploymentValues.Notifications
@@ -1134,7 +1118,7 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
autobuildTicker := time.NewTicker(vals.AutobuildPollInterval.Value())
defer autobuildTicker.Stop()
autobuildExecutor := autobuild.NewExecutor(
ctx, options.Database, options.Pubsub, coderAPI.FileCache, options.PrometheusRegistry, coderAPI.TemplateScheduleStore, &coderAPI.Auditor, coderAPI.AccessControlStore, coderAPI.BuildUsageChecker, logger, autobuildTicker.C, options.NotificationsEnqueuer, coderAPI.Experiments, coderAPI.WorkspaceBuilderMetrics)
ctx, options.Database, options.Pubsub, coderAPI.FileCache, options.PrometheusRegistry, coderAPI.TemplateScheduleStore, &coderAPI.Auditor, coderAPI.AccessControlStore, coderAPI.BuildUsageChecker, logger, autobuildTicker.C, options.NotificationsEnqueuer, coderAPI.Experiments)
autobuildExecutor.Run()
jobReaperTicker := time.NewTicker(vals.JobReaperDetectorInterval.Value())
@@ -2190,7 +2174,7 @@ func startBuiltinPostgres(ctx context.Context, cfg config.Root, logger slog.Logg
// existing database
retryPortDiscovery := errors.Is(err, os.ErrNotExist) && testing.Testing()
if retryPortDiscovery {
maxAttempts = 10
maxAttempts = 3
}
var startErr error
+19 -36
View File
@@ -1740,18 +1740,6 @@ func TestServer(t *testing.T) {
// Next, we instruct the same server to display the YAML config
// and then save it.
// Because this is literally the same invocation, DefaultFn sets the
// value of 'Default'. Which triggers a mutually exclusive error
// on the next parse.
// Usually we only parse flags once, so this is not an issue
for _, c := range inv.Command.Children {
if c.Name() == "server" {
for i := range c.Options {
c.Options[i].DefaultFn = nil
}
break
}
}
inv = inv.WithContext(testutil.Context(t, testutil.WaitMedium))
//nolint:gocritic
inv.Args = append(args, "--write-config")
@@ -2256,7 +2244,6 @@ type runServerOpts struct {
waitForSnapshot bool
telemetryDisabled bool
waitForTelemetryDisabledCheck bool
name string
}
func TestServer_TelemetryDisabled_FinalReport(t *testing.T) {
@@ -2279,23 +2266,25 @@ func TestServer_TelemetryDisabled_FinalReport(t *testing.T) {
"--cache-dir", cacheDir,
"--log-filter", ".*",
)
inv.Logger = inv.Logger.Named(opts.name)
finished := make(chan bool, 2)
errChan := make(chan error, 1)
pty := ptytest.New(t).Named(opts.name).Attach(inv)
pty := ptytest.New(t).Attach(inv)
go func() {
errChan <- inv.WithContext(ctx).Run()
// close the pty here so that we can start tearing down resources. This test creates multiple servers with
// associated ptys. There is a `t.Cleanup()` that does this, but it waits until the whole test is complete.
_ = pty.Close()
finished <- true
}()
if opts.waitForSnapshot {
pty.ExpectMatchContext(testutil.Context(t, testutil.WaitLong), "submitted snapshot")
}
if opts.waitForTelemetryDisabledCheck {
pty.ExpectMatchContext(testutil.Context(t, testutil.WaitLong), "finished telemetry status check")
}
go func() {
defer func() {
finished <- true
}()
if opts.waitForSnapshot {
pty.ExpectMatchContext(testutil.Context(t, testutil.WaitLong), "submitted snapshot")
}
if opts.waitForTelemetryDisabledCheck {
pty.ExpectMatchContext(testutil.Context(t, testutil.WaitLong), "finished telemetry status check")
}
}()
<-finished
return errChan, cancelFunc
}
waitForShutdown := func(t *testing.T, errChan chan error) error {
@@ -2309,9 +2298,7 @@ func TestServer_TelemetryDisabled_FinalReport(t *testing.T) {
return nil
}
errChan, cancelFunc := runServer(t, runServerOpts{
telemetryDisabled: true, waitForTelemetryDisabledCheck: true, name: "0disabled",
})
errChan, cancelFunc := runServer(t, runServerOpts{telemetryDisabled: true, waitForTelemetryDisabledCheck: true})
cancelFunc()
require.NoError(t, waitForShutdown(t, errChan))
@@ -2319,7 +2306,7 @@ func TestServer_TelemetryDisabled_FinalReport(t *testing.T) {
require.Empty(t, deployment)
require.Empty(t, snapshot)
errChan, cancelFunc = runServer(t, runServerOpts{waitForSnapshot: true, name: "1enabled"})
errChan, cancelFunc = runServer(t, runServerOpts{waitForSnapshot: true})
cancelFunc()
require.NoError(t, waitForShutdown(t, errChan))
// we expect to see a deployment and a snapshot twice:
@@ -2338,9 +2325,7 @@ func TestServer_TelemetryDisabled_FinalReport(t *testing.T) {
}
}
errChan, cancelFunc = runServer(t, runServerOpts{
telemetryDisabled: true, waitForTelemetryDisabledCheck: true, name: "2disabled",
})
errChan, cancelFunc = runServer(t, runServerOpts{telemetryDisabled: true, waitForTelemetryDisabledCheck: true})
cancelFunc()
require.NoError(t, waitForShutdown(t, errChan))
@@ -2356,9 +2341,7 @@ func TestServer_TelemetryDisabled_FinalReport(t *testing.T) {
t.Fatalf("timed out waiting for snapshot")
}
errChan, cancelFunc = runServer(t, runServerOpts{
telemetryDisabled: true, waitForTelemetryDisabledCheck: true, name: "3disabled",
})
errChan, cancelFunc = runServer(t, runServerOpts{telemetryDisabled: true, waitForTelemetryDisabledCheck: true})
cancelFunc()
require.NoError(t, waitForShutdown(t, errChan))
// Since telemetry is disabled and we've already sent a snapshot, we expect no
+58
View File
@@ -24,6 +24,7 @@ import (
"github.com/gofrs/flock"
"github.com/google/uuid"
"github.com/mattn/go-isatty"
"github.com/shirou/gopsutil/v4/process"
"github.com/spf13/afero"
gossh "golang.org/x/crypto/ssh"
gosshagent "golang.org/x/crypto/ssh/agent"
@@ -84,6 +85,9 @@ func (r *RootCmd) ssh() *serpent.Command {
containerName string
containerUser string
// Used in tests to simulate the parent exiting.
testForcePPID int64
)
cmd := &serpent.Command{
Annotations: workspaceCommand,
@@ -175,6 +179,24 @@ func (r *RootCmd) ssh() *serpent.Command {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// When running as a ProxyCommand (stdio mode), monitor the parent process
// and exit if it dies to avoid leaving orphaned processes. This is
// particularly important when editors like VSCode/Cursor spawn SSH
// connections and then crash or are killed - we don't want zombie
// `coder ssh` processes accumulating.
// Note: using gopsutil to check the parent process as this handles
// windows processes as well in a standard way.
if stdio {
ppid := int32(os.Getppid()) // nolint:gosec
checkParentInterval := 10 * time.Second // Arbitrary interval to not be too frequent
if testForcePPID > 0 {
ppid = int32(testForcePPID) // nolint:gosec
checkParentInterval = 100 * time.Millisecond // Shorter interval for testing
}
ctx, cancel = watchParentContext(ctx, quartz.NewReal(), ppid, process.PidExistsWithContext, checkParentInterval)
defer cancel()
}
// Prevent unnecessary logs from the stdlib from messing up the TTY.
// See: https://github.com/coder/coder/issues/13144
log.SetOutput(io.Discard)
@@ -775,6 +797,12 @@ func (r *RootCmd) ssh() *serpent.Command {
Value: serpent.BoolOf(&forceNewTunnel),
Hidden: true,
},
{
Flag: "test.force-ppid",
Description: "Override the parent process ID to simulate a different parent process. ONLY USE THIS IN TESTS.",
Value: serpent.Int64Of(&testForcePPID),
Hidden: true,
},
sshDisableAutostartOption(serpent.BoolOf(&disableAutostart)),
}
return cmd
@@ -1662,3 +1690,33 @@ func normalizeWorkspaceInput(input string) string {
return input // Fallback
}
}
// watchParentContext returns a context that is canceled when the parent process
// dies. It polls using the provided clock and checks if the parent is alive
// using the provided pidExists function.
func watchParentContext(ctx context.Context, clock quartz.Clock, originalPPID int32, pidExists func(context.Context, int32) (bool, error), interval time.Duration) (context.Context, context.CancelFunc) {
ctx, cancel := context.WithCancel(ctx) // intentionally shadowed
go func() {
ticker := clock.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
alive, err := pidExists(ctx, originalPPID)
// If we get an error checking the parent process (e.g., permission
// denied, the process is in an unknown state), we assume the parent
// is still alive to avoid disrupting the SSH connection. We only
// cancel when we definitively know the parent is gone (alive=false, err=nil).
if !alive && err == nil {
cancel()
return
}
}
}
}()
return ctx, cancel
}
+96
View File
@@ -312,6 +312,102 @@ type fakeCloser struct {
err error
}
func TestWatchParentContext(t *testing.T) {
t.Parallel()
t.Run("CancelsWhenParentDies", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitShort)
mClock := quartz.NewMock(t)
trap := mClock.Trap().NewTicker()
defer trap.Close()
parentAlive := true
childCtx, cancel := watchParentContext(ctx, mClock, 1234, func(context.Context, int32) (bool, error) {
return parentAlive, nil
}, testutil.WaitShort)
defer cancel()
// Wait for the ticker to be created
trap.MustWait(ctx).MustRelease(ctx)
// When: we simulate parent death and advance the clock
parentAlive = false
mClock.AdvanceNext()
// Then: The context should be canceled
_ = testutil.TryReceive(ctx, t, childCtx.Done())
})
t.Run("DoesNotCancelWhenParentAlive", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitShort)
mClock := quartz.NewMock(t)
trap := mClock.Trap().NewTicker()
defer trap.Close()
childCtx, cancel := watchParentContext(ctx, mClock, 1234, func(context.Context, int32) (bool, error) {
return true, nil // Parent always alive
}, testutil.WaitShort)
defer cancel()
// Wait for the ticker to be created
trap.MustWait(ctx).MustRelease(ctx)
// When: we advance the clock several times with the parent alive
for range 3 {
mClock.AdvanceNext()
}
// Then: context should not be canceled
require.NoError(t, childCtx.Err())
})
t.Run("RespectsParentContext", func(t *testing.T) {
t.Parallel()
ctx, cancelParent := context.WithCancel(context.Background())
mClock := quartz.NewMock(t)
childCtx, cancel := watchParentContext(ctx, mClock, 1234, func(context.Context, int32) (bool, error) {
return true, nil
}, testutil.WaitShort)
defer cancel()
// When: we cancel the parent context
cancelParent()
// Then: The context should be canceled
require.ErrorIs(t, childCtx.Err(), context.Canceled)
})
t.Run("DoesNotCancelOnError", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitShort)
mClock := quartz.NewMock(t)
trap := mClock.Trap().NewTicker()
defer trap.Close()
// Simulate an error checking parent status (e.g., permission denied).
// We should not cancel the context in this case to avoid disrupting
// the SSH connection.
childCtx, cancel := watchParentContext(ctx, mClock, 1234, func(context.Context, int32) (bool, error) {
return false, xerrors.New("permission denied")
}, testutil.WaitShort)
defer cancel()
// Wait for the ticker to be created
trap.MustWait(ctx).MustRelease(ctx)
// When: we advance clock several times
for range 3 {
mClock.AdvanceNext()
}
// Context should NOT be canceled since we got an error (not a definitive "not alive")
require.NoError(t, childCtx.Err(), "context was canceled even though pidExists returned an error")
})
}
func (c *fakeCloser) Close() error {
*c.closes = append(*c.closes, c)
return c.err
+101
View File
@@ -1122,6 +1122,107 @@ func TestSSH(t *testing.T) {
}
})
// This test ensures that the SSH session exits when the parent process dies.
t.Run("StdioExitOnParentDeath", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong)
defer cancel()
// sleepStart -> agentReady -> sessionStarted -> sleepKill -> sleepDone -> cmdDone
sleepStart := make(chan int)
agentReady := make(chan struct{})
sessionStarted := make(chan struct{})
sleepKill := make(chan struct{})
sleepDone := make(chan struct{})
// Start a sleep process which we will pretend is the parent.
go func() {
sleepCmd := exec.Command("sleep", "infinity")
if !assert.NoError(t, sleepCmd.Start(), "failed to start sleep command") {
return
}
sleepStart <- sleepCmd.Process.Pid
defer close(sleepDone)
<-sleepKill
sleepCmd.Process.Kill()
_ = sleepCmd.Wait()
}()
client, workspace, agentToken := setupWorkspaceForAgent(t)
go func() {
defer close(agentReady)
_ = agenttest.New(t, client.URL, agentToken)
coderdtest.NewWorkspaceAgentWaiter(t, client, workspace.ID).WaitFor(coderdtest.AgentsReady)
}()
clientOutput, clientInput := io.Pipe()
serverOutput, serverInput := io.Pipe()
defer func() {
for _, c := range []io.Closer{clientOutput, clientInput, serverOutput, serverInput} {
_ = c.Close()
}
}()
// Start a connection to the agent once it's ready
go func() {
<-agentReady
conn, channels, requests, err := ssh.NewClientConn(&testutil.ReaderWriterConn{
Reader: serverOutput,
Writer: clientInput,
}, "", &ssh.ClientConfig{
// #nosec
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
})
if !assert.NoError(t, err, "failed to create SSH client connection") {
return
}
defer conn.Close()
sshClient := ssh.NewClient(conn, channels, requests)
defer sshClient.Close()
session, err := sshClient.NewSession()
if !assert.NoError(t, err, "failed to create SSH session") {
return
}
close(sessionStarted)
<-sleepDone
// Ref: https://github.com/coder/internal/issues/1289
// This may return either a nil error or io.EOF.
// There is an inherent race here:
// 1. Sleep process is killed -> sleepDone is closed.
// 2. watchParentContext detects parent death, cancels context,
// causing SSH session teardown.
// 3. We receive from sleepDone and attempt to call session.Close()
// Now either:
// a. Session teardown completes before we call Close(), resulting in io.EOF
// b. We call Close() first, resulting in a nil error.
_ = session.Close()
}()
// Wait for our "parent" process to start
sleepPid := testutil.RequireReceive(ctx, t, sleepStart)
// Wait for the agent to be ready
testutil.SoftTryReceive(ctx, t, agentReady)
inv, root := clitest.New(t, "ssh", "--stdio", workspace.Name, "--test.force-ppid", fmt.Sprintf("%d", sleepPid))
clitest.SetupConfig(t, client, root)
inv.Stdin = clientOutput
inv.Stdout = serverInput
inv.Stderr = io.Discard
// Start the command
clitest.Start(t, inv.WithContext(ctx))
// Wait for a session to be established
testutil.SoftTryReceive(ctx, t, sessionStarted)
// Now kill the fake "parent"
close(sleepKill)
// The sleep process should exit
testutil.SoftTryReceive(ctx, t, sleepDone)
// And then the command should exit. This is tracked by clitest.Start.
})
t.Run("ForwardAgent", func(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("Test not supported on windows")
+7 -9
View File
@@ -1,3 +1,5 @@
//go:build !windows
package cli_test
import (
@@ -5,7 +7,6 @@ import (
"context"
"os"
"path/filepath"
"runtime"
"testing"
"time"
@@ -24,15 +25,12 @@ func setupSocketServer(t *testing.T) (path string, cleanup func()) {
t.Helper()
// Use a temporary socket path for each test
socketPath := testutil.AgentSocketPath(t)
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
// Create parent directory if needed. Not necessary on Windows because named pipes live in an abstract namespace
// not tied to any real files.
if runtime.GOOS != "windows" {
parentDir := filepath.Dir(socketPath)
err := os.MkdirAll(parentDir, 0o700)
require.NoError(t, err, "create socket directory")
}
// Create parent directory if needed
parentDir := filepath.Dir(socketPath)
err := os.MkdirAll(parentDir, 0o700)
require.NoError(t, err, "create socket directory")
server, err := agentsocket.NewServer(
slog.Make().Leveled(slog.LevelDebug),
-2
View File
@@ -17,8 +17,6 @@ func (r *RootCmd) tasksCommand() *serpent.Command {
r.taskDelete(),
r.taskList(),
r.taskLogs(),
r.taskPause(),
r.taskResume(),
r.taskSend(),
r.taskStatus(),
},
+25 -29
View File
@@ -39,15 +39,15 @@ func Test_TaskLogs_Golden(t *testing.T) {
t.Run("ByTaskName_JSON", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitLong)
setupCtx := testutil.Context(t, testutil.WaitLong)
_, userClient, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskLogsOK(testMessages))
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskLogsOK(testMessages))
userClient := client // user already has access to their own workspace
inv, root := clitest.New(t, "task", "logs", task.Name, "--output", "json")
output := clitest.Capture(inv)
clitest.SetupConfig(t, userClient, root)
ctx := testutil.Context(t, testutil.WaitLong)
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
@@ -62,15 +62,15 @@ func Test_TaskLogs_Golden(t *testing.T) {
t.Run("ByTaskID_JSON", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitLong)
setupCtx := testutil.Context(t, testutil.WaitLong)
_, userClient, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskLogsOK(testMessages))
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskLogsOK(testMessages))
userClient := client
inv, root := clitest.New(t, "task", "logs", task.ID.String(), "--output", "json")
output := clitest.Capture(inv)
clitest.SetupConfig(t, userClient, root)
ctx := testutil.Context(t, testutil.WaitLong)
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
@@ -85,15 +85,15 @@ func Test_TaskLogs_Golden(t *testing.T) {
t.Run("ByTaskID_Table", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitLong)
setupCtx := testutil.Context(t, testutil.WaitLong)
_, userClient, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskLogsOK(testMessages))
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskLogsOK(testMessages))
userClient := client
inv, root := clitest.New(t, "task", "logs", task.ID.String())
output := clitest.Capture(inv)
clitest.SetupConfig(t, userClient, root)
ctx := testutil.Context(t, testutil.WaitLong)
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
@@ -139,30 +139,29 @@ func Test_TaskLogs_Golden(t *testing.T) {
t.Run("ErrorFetchingLogs", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitLong)
setupCtx := testutil.Context(t, testutil.WaitLong)
_, userClient, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskLogsErr(assert.AnError))
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskLogsErr(assert.AnError))
userClient := client
inv, root := clitest.New(t, "task", "logs", task.ID.String())
clitest.SetupConfig(t, userClient, root)
ctx := testutil.Context(t, testutil.WaitLong)
err := inv.WithContext(ctx).Run()
require.ErrorContains(t, err, assert.AnError.Error())
})
t.Run("SnapshotWithLogs_Table", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitLong)
setupCtx := testutil.Context(t, testutil.WaitLong)
client, task := setupCLITaskTestWithSnapshot(setupCtx, t, codersdk.TaskStatusPaused, testMessages)
client, task := setupCLITaskTestWithSnapshot(ctx, t, codersdk.TaskStatusPaused, testMessages)
userClient := client
inv, root := clitest.New(t, "task", "logs", task.Name)
output := clitest.Capture(inv)
clitest.SetupConfig(t, userClient, root)
ctx := testutil.Context(t, testutil.WaitLong)
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
@@ -172,16 +171,15 @@ func Test_TaskLogs_Golden(t *testing.T) {
t.Run("SnapshotWithLogs_JSON", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitLong)
setupCtx := testutil.Context(t, testutil.WaitLong)
client, task := setupCLITaskTestWithSnapshot(setupCtx, t, codersdk.TaskStatusPaused, testMessages)
client, task := setupCLITaskTestWithSnapshot(ctx, t, codersdk.TaskStatusPaused, testMessages)
userClient := client
inv, root := clitest.New(t, "task", "logs", task.Name, "--output", "json")
output := clitest.Capture(inv)
clitest.SetupConfig(t, userClient, root)
ctx := testutil.Context(t, testutil.WaitLong)
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
@@ -196,14 +194,15 @@ func Test_TaskLogs_Golden(t *testing.T) {
t.Run("SnapshotWithoutLogs_NoSnapshotCaptured", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitLong)
userClient, task := setupCLITaskTestWithoutSnapshot(t, codersdk.TaskStatusPaused)
client, task := setupCLITaskTestWithoutSnapshot(t, codersdk.TaskStatusPaused)
userClient := client
inv, root := clitest.New(t, "task", "logs", task.Name)
output := clitest.Capture(inv)
clitest.SetupConfig(t, userClient, root)
ctx := testutil.Context(t, testutil.WaitLong)
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
@@ -213,6 +212,7 @@ func Test_TaskLogs_Golden(t *testing.T) {
t.Run("SnapshotWithSingleMessage", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitLong)
singleMessage := []agentapisdk.Message{
{
@@ -223,15 +223,13 @@ func Test_TaskLogs_Golden(t *testing.T) {
},
}
setupCtx := testutil.Context(t, testutil.WaitLong)
client, task := setupCLITaskTestWithSnapshot(setupCtx, t, codersdk.TaskStatusPending, singleMessage)
client, task := setupCLITaskTestWithSnapshot(ctx, t, codersdk.TaskStatusPending, singleMessage)
userClient := client
inv, root := clitest.New(t, "task", "logs", task.Name)
output := clitest.Capture(inv)
clitest.SetupConfig(t, userClient, root)
ctx := testutil.Context(t, testutil.WaitLong)
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
@@ -241,16 +239,15 @@ func Test_TaskLogs_Golden(t *testing.T) {
t.Run("SnapshotEmptyLogs", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitLong)
setupCtx := testutil.Context(t, testutil.WaitLong)
client, task := setupCLITaskTestWithSnapshot(setupCtx, t, codersdk.TaskStatusInitializing, []agentapisdk.Message{})
client, task := setupCLITaskTestWithSnapshot(ctx, t, codersdk.TaskStatusInitializing, []agentapisdk.Message{})
userClient := client
inv, root := clitest.New(t, "task", "logs", task.Name)
output := clitest.Capture(inv)
clitest.SetupConfig(t, userClient, root)
ctx := testutil.Context(t, testutil.WaitLong)
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
@@ -260,16 +257,15 @@ func Test_TaskLogs_Golden(t *testing.T) {
t.Run("InitializingTaskSnapshot", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitLong)
setupCtx := testutil.Context(t, testutil.WaitLong)
client, task := setupCLITaskTestWithSnapshot(setupCtx, t, codersdk.TaskStatusInitializing, testMessages)
client, task := setupCLITaskTestWithSnapshot(ctx, t, codersdk.TaskStatusInitializing, testMessages)
userClient := client
inv, root := clitest.New(t, "task", "logs", task.Name)
output := clitest.Capture(inv)
clitest.SetupConfig(t, userClient, root)
ctx := testutil.Context(t, testutil.WaitLong)
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
-90
View File
@@ -1,90 +0,0 @@
package cli
import (
"fmt"
"time"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/cli/cliui"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/pretty"
"github.com/coder/serpent"
)
func (r *RootCmd) taskPause() *serpent.Command {
cmd := &serpent.Command{
Use: "pause <task>",
Short: "Pause a task",
Long: FormatExamples(
Example{
Description: "Pause a task by name",
Command: "coder task pause my-task",
},
Example{
Description: "Pause another user's task",
Command: "coder task pause alice/my-task",
},
Example{
Description: "Pause a task without confirmation",
Command: "coder task pause my-task --yes",
},
),
Middleware: serpent.Chain(
serpent.RequireNArgs(1),
),
Options: serpent.OptionSet{
cliui.SkipPromptOption(),
},
Handler: func(inv *serpent.Invocation) error {
ctx := inv.Context()
client, err := r.InitClient(inv)
if err != nil {
return err
}
task, err := client.TaskByIdentifier(ctx, inv.Args[0])
if err != nil {
return xerrors.Errorf("resolve task %q: %w", inv.Args[0], err)
}
display := fmt.Sprintf("%s/%s", task.OwnerName, task.Name)
if task.Status == codersdk.TaskStatusPaused {
return xerrors.Errorf("task %q is already paused", display)
}
_, err = cliui.Prompt(inv, cliui.PromptOptions{
Text: fmt.Sprintf("Pause task %s?", pretty.Sprint(cliui.DefaultStyles.Code, display)),
IsConfirm: true,
Default: cliui.ConfirmNo,
})
if err != nil {
return err
}
resp, err := client.PauseTask(ctx, task.OwnerName, task.ID)
if err != nil {
return xerrors.Errorf("pause task %q: %w", display, err)
}
if resp.WorkspaceBuild == nil {
return xerrors.Errorf("pause task %q: no workspace build returned", display)
}
err = cliui.WorkspaceBuild(ctx, inv.Stdout, client, resp.WorkspaceBuild.ID)
if err != nil {
return xerrors.Errorf("watch pause build for task %q: %w", display, err)
}
_, _ = fmt.Fprintf(
inv.Stdout,
"\nThe %s task has been paused at %s!\n",
cliui.Keyword(task.Name),
cliui.Timestamp(time.Now()),
)
return nil
},
}
return cmd
}
-144
View File
@@ -1,144 +0,0 @@
package cli_test
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/coder/coder/v2/cli/clitest"
"github.com/coder/coder/v2/coderd/coderdtest"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/pty/ptytest"
"github.com/coder/coder/v2/testutil"
)
func TestExpTaskPause(t *testing.T) {
t.Parallel()
t.Run("WithYesFlag", func(t *testing.T) {
t.Parallel()
// Given: A running task
setupCtx := testutil.Context(t, testutil.WaitLong)
_, userClient, task := setupCLITaskTest(setupCtx, t, nil)
// When: We attempt to pause the task
inv, root := clitest.New(t, "task", "pause", task.Name, "--yes")
output := clitest.Capture(inv)
clitest.SetupConfig(t, userClient, root)
// Then: Expect the task to be paused
ctx := testutil.Context(t, testutil.WaitMedium)
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
require.Contains(t, output.Stdout(), "has been paused")
updated, err := userClient.TaskByIdentifier(ctx, task.Name)
require.NoError(t, err)
require.Equal(t, codersdk.TaskStatusPaused, updated.Status)
})
// OtherUserTask verifies that an admin can pause a task owned by
// another user using the "owner/name" identifier format.
t.Run("OtherUserTask", func(t *testing.T) {
t.Parallel()
// Given: A different user's running task
setupCtx := testutil.Context(t, testutil.WaitLong)
adminClient, _, task := setupCLITaskTest(setupCtx, t, nil)
// When: We attempt to pause their task
identifier := fmt.Sprintf("%s/%s", task.OwnerName, task.Name)
inv, root := clitest.New(t, "task", "pause", identifier, "--yes")
output := clitest.Capture(inv)
clitest.SetupConfig(t, adminClient, root)
// Then: We expect the task to be paused
ctx := testutil.Context(t, testutil.WaitMedium)
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
require.Contains(t, output.Stdout(), "has been paused")
updated, err := adminClient.TaskByIdentifier(ctx, identifier)
require.NoError(t, err)
require.Equal(t, codersdk.TaskStatusPaused, updated.Status)
})
t.Run("PromptConfirm", func(t *testing.T) {
t.Parallel()
// Given: A running task
setupCtx := testutil.Context(t, testutil.WaitLong)
_, userClient, task := setupCLITaskTest(setupCtx, t, nil)
// When: We attempt to pause the task
inv, root := clitest.New(t, "task", "pause", task.Name)
clitest.SetupConfig(t, userClient, root)
// And: We confirm we want to pause the task
ctx := testutil.Context(t, testutil.WaitMedium)
inv = inv.WithContext(ctx)
pty := ptytest.New(t).Attach(inv)
w := clitest.StartWithWaiter(t, inv)
pty.ExpectMatchContext(ctx, "Pause task")
pty.WriteLine("yes")
// Then: We expect the task to be paused
pty.ExpectMatchContext(ctx, "has been paused")
require.NoError(t, w.Wait())
updated, err := userClient.TaskByIdentifier(ctx, task.Name)
require.NoError(t, err)
require.Equal(t, codersdk.TaskStatusPaused, updated.Status)
})
t.Run("PromptDecline", func(t *testing.T) {
t.Parallel()
// Given: A running task
setupCtx := testutil.Context(t, testutil.WaitLong)
_, userClient, task := setupCLITaskTest(setupCtx, t, nil)
// When: We attempt to pause the task
inv, root := clitest.New(t, "task", "pause", task.Name)
clitest.SetupConfig(t, userClient, root)
// But: We say no at the confirmation screen
ctx := testutil.Context(t, testutil.WaitMedium)
inv = inv.WithContext(ctx)
pty := ptytest.New(t).Attach(inv)
w := clitest.StartWithWaiter(t, inv)
pty.ExpectMatchContext(ctx, "Pause task")
pty.WriteLine("no")
require.Error(t, w.Wait())
// Then: We expect the task to not be paused
updated, err := userClient.TaskByIdentifier(ctx, task.Name)
require.NoError(t, err)
require.NotEqual(t, codersdk.TaskStatusPaused, updated.Status)
})
t.Run("TaskAlreadyPaused", func(t *testing.T) {
t.Parallel()
// Given: A running task
setupCtx := testutil.Context(t, testutil.WaitLong)
_, userClient, task := setupCLITaskTest(setupCtx, t, nil)
// And: We paused the running task
ctx := testutil.Context(t, testutil.WaitMedium)
resp, err := userClient.PauseTask(ctx, task.OwnerName, task.ID)
require.NoError(t, err)
require.NotNil(t, resp.WorkspaceBuild)
coderdtest.AwaitWorkspaceBuildJobCompleted(t, userClient, resp.WorkspaceBuild.ID)
// When: We attempt to pause the task again
inv, root := clitest.New(t, "task", "pause", task.Name, "--yes")
clitest.SetupConfig(t, userClient, root)
// Then: We expect to get an error that the task is already paused
err = inv.WithContext(ctx).Run()
require.ErrorContains(t, err, "is already paused")
})
}
-95
View File
@@ -1,95 +0,0 @@
package cli
import (
"fmt"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/cli/cliui"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/pretty"
"github.com/coder/serpent"
)
func (r *RootCmd) taskResume() *serpent.Command {
var noWait bool
cmd := &serpent.Command{
Use: "resume <task>",
Short: "Resume a task",
Long: FormatExamples(
Example{
Description: "Resume a task by name",
Command: "coder task resume my-task",
},
Example{
Description: "Resume another user's task",
Command: "coder task resume alice/my-task",
},
Example{
Description: "Resume a task without confirmation",
Command: "coder task resume my-task --yes",
},
),
Middleware: serpent.Chain(
serpent.RequireNArgs(1),
),
Options: serpent.OptionSet{
{
Flag: "no-wait",
Description: "Return immediately after resuming the task.",
Value: serpent.BoolOf(&noWait),
},
cliui.SkipPromptOption(),
},
Handler: func(inv *serpent.Invocation) error {
ctx := inv.Context()
client, err := r.InitClient(inv)
if err != nil {
return err
}
task, err := client.TaskByIdentifier(ctx, inv.Args[0])
if err != nil {
return xerrors.Errorf("resolve task %q: %w", inv.Args[0], err)
}
display := fmt.Sprintf("%s/%s", task.OwnerName, task.Name)
if task.Status == codersdk.TaskStatusError || task.Status == codersdk.TaskStatusUnknown {
return xerrors.Errorf("task %q is in %s state and cannot be resumed; check the workspace build logs and agent status for details", display, task.Status)
} else if task.Status != codersdk.TaskStatusPaused {
return xerrors.Errorf("task %q cannot be resumed (current status: %s)", display, task.Status)
}
_, err = cliui.Prompt(inv, cliui.PromptOptions{
Text: fmt.Sprintf("Resume task %s?", pretty.Sprint(cliui.DefaultStyles.Code, display)),
IsConfirm: true,
Default: cliui.ConfirmNo,
})
if err != nil {
return err
}
resp, err := client.ResumeTask(ctx, task.OwnerName, task.ID)
if err != nil {
return xerrors.Errorf("resume task %q: %w", display, err)
} else if resp.WorkspaceBuild == nil {
return xerrors.Errorf("resume task %q: no workspace build returned", display)
}
if noWait {
_, _ = fmt.Fprintf(inv.Stdout, "Resuming task %q in the background.\n", cliui.Keyword(display))
return nil
}
if err = cliui.WorkspaceBuild(ctx, inv.Stdout, client, resp.WorkspaceBuild.ID); err != nil {
return xerrors.Errorf("watch resume build for task %q: %w", display, err)
}
_, _ = fmt.Fprintf(inv.Stdout, "\nThe %s task has been resumed.\n", cliui.Keyword(display))
return nil
},
}
return cmd
}
-183
View File
@@ -1,183 +0,0 @@
package cli_test
import (
"context"
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/coder/coder/v2/cli/clitest"
"github.com/coder/coder/v2/coderd/coderdtest"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/pty/ptytest"
"github.com/coder/coder/v2/testutil"
)
func TestExpTaskResume(t *testing.T) {
t.Parallel()
// pauseTask is a helper that pauses a task and waits for the stop
// build to complete.
pauseTask := func(ctx context.Context, t *testing.T, client *codersdk.Client, task codersdk.Task) {
t.Helper()
pauseResp, err := client.PauseTask(ctx, task.OwnerName, task.ID)
require.NoError(t, err)
require.NotNil(t, pauseResp.WorkspaceBuild)
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, pauseResp.WorkspaceBuild.ID)
}
t.Run("WithYesFlag", func(t *testing.T) {
t.Parallel()
// Given: A paused task
setupCtx := testutil.Context(t, testutil.WaitLong)
_, userClient, task := setupCLITaskTest(setupCtx, t, nil)
pauseTask(setupCtx, t, userClient, task)
// When: We attempt to resume the task
inv, root := clitest.New(t, "task", "resume", task.Name, "--yes")
output := clitest.Capture(inv)
clitest.SetupConfig(t, userClient, root)
// Then: We expect the task to be resumed
ctx := testutil.Context(t, testutil.WaitMedium)
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
require.Contains(t, output.Stdout(), "has been resumed")
updated, err := userClient.TaskByIdentifier(ctx, task.Name)
require.NoError(t, err)
require.Equal(t, codersdk.TaskStatusInitializing, updated.Status)
})
// OtherUserTask verifies that an admin can resume a task owned by
// another user using the "owner/name" identifier format.
t.Run("OtherUserTask", func(t *testing.T) {
t.Parallel()
// Given: A different user's paused task
setupCtx := testutil.Context(t, testutil.WaitLong)
adminClient, userClient, task := setupCLITaskTest(setupCtx, t, nil)
pauseTask(setupCtx, t, userClient, task)
// When: We attempt to resume their task
identifier := fmt.Sprintf("%s/%s", task.OwnerName, task.Name)
inv, root := clitest.New(t, "task", "resume", identifier, "--yes")
output := clitest.Capture(inv)
clitest.SetupConfig(t, adminClient, root)
// Then: We expect the task to be resumed
ctx := testutil.Context(t, testutil.WaitMedium)
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
require.Contains(t, output.Stdout(), "has been resumed")
updated, err := adminClient.TaskByIdentifier(ctx, identifier)
require.NoError(t, err)
require.Equal(t, codersdk.TaskStatusInitializing, updated.Status)
})
t.Run("NoWait", func(t *testing.T) {
t.Parallel()
// Given: A paused task
setupCtx := testutil.Context(t, testutil.WaitLong)
_, userClient, task := setupCLITaskTest(setupCtx, t, nil)
pauseTask(setupCtx, t, userClient, task)
// When: We attempt to resume the task (and specify no wait)
inv, root := clitest.New(t, "task", "resume", task.Name, "--yes", "--no-wait")
output := clitest.Capture(inv)
clitest.SetupConfig(t, userClient, root)
// Then: We expect the task to be resumed in the background
ctx := testutil.Context(t, testutil.WaitMedium)
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
require.Contains(t, output.Stdout(), "in the background")
// And: The task to eventually be resumed
require.True(t, task.WorkspaceID.Valid, "task should have a workspace ID")
ws := coderdtest.MustWorkspace(t, userClient, task.WorkspaceID.UUID)
coderdtest.AwaitWorkspaceBuildJobCompleted(t, userClient, ws.LatestBuild.ID)
updated, err := userClient.TaskByIdentifier(ctx, task.Name)
require.NoError(t, err)
require.Equal(t, codersdk.TaskStatusInitializing, updated.Status)
})
t.Run("PromptConfirm", func(t *testing.T) {
t.Parallel()
// Given: A paused task
setupCtx := testutil.Context(t, testutil.WaitLong)
_, userClient, task := setupCLITaskTest(setupCtx, t, nil)
pauseTask(setupCtx, t, userClient, task)
// When: We attempt to resume the task
inv, root := clitest.New(t, "task", "resume", task.Name)
clitest.SetupConfig(t, userClient, root)
// And: We confirm we want to resume the task
ctx := testutil.Context(t, testutil.WaitMedium)
inv = inv.WithContext(ctx)
pty := ptytest.New(t).Attach(inv)
w := clitest.StartWithWaiter(t, inv)
pty.ExpectMatchContext(ctx, "Resume task")
pty.WriteLine("yes")
// Then: We expect the task to be resumed
pty.ExpectMatchContext(ctx, "has been resumed")
require.NoError(t, w.Wait())
updated, err := userClient.TaskByIdentifier(ctx, task.Name)
require.NoError(t, err)
require.Equal(t, codersdk.TaskStatusInitializing, updated.Status)
})
t.Run("PromptDecline", func(t *testing.T) {
t.Parallel()
// Given: A paused task
setupCtx := testutil.Context(t, testutil.WaitLong)
_, userClient, task := setupCLITaskTest(setupCtx, t, nil)
pauseTask(setupCtx, t, userClient, task)
// When: We attempt to resume the task
inv, root := clitest.New(t, "task", "resume", task.Name)
clitest.SetupConfig(t, userClient, root)
// But: Say no at the confirmation screen
ctx := testutil.Context(t, testutil.WaitMedium)
inv = inv.WithContext(ctx)
pty := ptytest.New(t).Attach(inv)
w := clitest.StartWithWaiter(t, inv)
pty.ExpectMatchContext(ctx, "Resume task")
pty.WriteLine("no")
require.Error(t, w.Wait())
// Then: We expect the task to still be paused
updated, err := userClient.TaskByIdentifier(ctx, task.Name)
require.NoError(t, err)
require.Equal(t, codersdk.TaskStatusPaused, updated.Status)
})
t.Run("TaskNotPaused", func(t *testing.T) {
t.Parallel()
// Given: A running task
setupCtx := testutil.Context(t, testutil.WaitLong)
_, userClient, task := setupCLITaskTest(setupCtx, t, nil)
// When: We attempt to resume the task that is not paused
inv, root := clitest.New(t, "task", "resume", task.Name, "--yes")
clitest.SetupConfig(t, userClient, root)
// Then: We expect to get an error that the task is not paused
ctx := testutil.Context(t, testutil.WaitMedium)
err := inv.WithContext(ctx).Run()
require.ErrorContains(t, err, "cannot be resumed")
})
}
+11 -12
View File
@@ -23,41 +23,42 @@ func Test_TaskSend(t *testing.T) {
t.Run("ByTaskName_WithArgument", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitLong)
setupCtx := testutil.Context(t, testutil.WaitLong)
_, userClient, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
userClient := client
var stdout strings.Builder
inv, root := clitest.New(t, "task", "send", task.Name, "carry on with the task")
inv.Stdout = &stdout
clitest.SetupConfig(t, userClient, root)
ctx := testutil.Context(t, testutil.WaitLong)
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
})
t.Run("ByTaskID_WithArgument", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitLong)
setupCtx := testutil.Context(t, testutil.WaitLong)
_, userClient, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
userClient := client
var stdout strings.Builder
inv, root := clitest.New(t, "task", "send", task.ID.String(), "carry on with the task")
inv.Stdout = &stdout
clitest.SetupConfig(t, userClient, root)
ctx := testutil.Context(t, testutil.WaitLong)
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
})
t.Run("ByTaskName_WithStdin", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitLong)
setupCtx := testutil.Context(t, testutil.WaitLong)
_, userClient, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
userClient := client
var stdout strings.Builder
inv, root := clitest.New(t, "task", "send", task.Name, "--stdin")
@@ -65,7 +66,6 @@ func Test_TaskSend(t *testing.T) {
inv.Stdin = strings.NewReader("carry on with the task")
clitest.SetupConfig(t, userClient, root)
ctx := testutil.Context(t, testutil.WaitLong)
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
})
@@ -108,16 +108,15 @@ func Test_TaskSend(t *testing.T) {
t.Run("SendError", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitLong)
setupCtx := testutil.Context(t, testutil.WaitLong)
_, userClient, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskSendErr(t, assert.AnError))
userClient, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskSendErr(t, assert.AnError))
var stdout strings.Builder
inv, root := clitest.New(t, "task", "send", task.Name, "some task input")
inv.Stdout = &stdout
clitest.SetupConfig(t, userClient, root)
ctx := testutil.Context(t, testutil.WaitLong)
err := inv.WithContext(ctx).Run()
require.ErrorContains(t, err, assert.AnError.Error())
})
+10 -44
View File
@@ -120,40 +120,6 @@ func Test_Tasks(t *testing.T) {
require.Equal(t, logs[2].Type, codersdk.TaskLogTypeOutput, "third message should be an output")
},
},
{
name: "pause task",
cmdArgs: []string{"task", "pause", taskName, "--yes"},
assertFn: func(stdout string, userClient *codersdk.Client) {
require.Contains(t, stdout, "has been paused", "pause output should confirm task was paused")
},
},
{
name: "get task status after pause",
cmdArgs: []string{"task", "status", taskName, "--output", "json"},
assertFn: func(stdout string, userClient *codersdk.Client) {
var task codersdk.Task
require.NoError(t, json.NewDecoder(strings.NewReader(stdout)).Decode(&task), "should unmarshal task status")
require.Equal(t, taskName, task.Name, "task name should match")
require.Equal(t, codersdk.TaskStatusPaused, task.Status, "task should be paused")
},
},
{
name: "resume task",
cmdArgs: []string{"task", "resume", taskName, "--yes"},
assertFn: func(stdout string, userClient *codersdk.Client) {
require.Contains(t, stdout, "has been resumed", "resume output should confirm task was resumed")
},
},
{
name: "get task status after resume",
cmdArgs: []string{"task", "status", taskName, "--output", "json"},
assertFn: func(stdout string, userClient *codersdk.Client) {
var task codersdk.Task
require.NoError(t, json.NewDecoder(strings.NewReader(stdout)).Decode(&task), "should unmarshal task status")
require.Equal(t, taskName, task.Name, "task name should match")
require.Equal(t, codersdk.TaskStatusInitializing, task.Status, "task should be initializing after resume")
},
},
{
name: "delete task",
cmdArgs: []string{"task", "delete", taskName, "--yes"},
@@ -272,17 +238,17 @@ func fakeAgentAPIEcho(ctx context.Context, t testing.TB, initMsg agentapisdk.Mes
// setupCLITaskTest creates a test workspace with an AI task template and agent,
// with a fake agent API configured with the provided set of handlers.
// Returns the user client and workspace.
func setupCLITaskTest(ctx context.Context, t *testing.T, agentAPIHandlers map[string]http.HandlerFunc) (ownerClient *codersdk.Client, memberClient *codersdk.Client, task codersdk.Task) {
func setupCLITaskTest(ctx context.Context, t *testing.T, agentAPIHandlers map[string]http.HandlerFunc) (*codersdk.Client, codersdk.Task) {
t.Helper()
ownerClient = coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
owner := coderdtest.CreateFirstUser(t, ownerClient)
userClient, _ := coderdtest.CreateAnotherUser(t, ownerClient, owner.OrganizationID)
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
owner := coderdtest.CreateFirstUser(t, client)
userClient, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
fakeAPI := startFakeAgentAPI(t, agentAPIHandlers)
authToken := uuid.NewString()
template := createAITaskTemplate(t, ownerClient, owner.OrganizationID, withSidebarURL(fakeAPI.URL()), withAgentToken(authToken))
template := createAITaskTemplate(t, client, owner.OrganizationID, withSidebarURL(fakeAPI.URL()), withAgentToken(authToken))
wantPrompt := "test prompt"
task, err := userClient.CreateTask(ctx, codersdk.Me, codersdk.CreateTaskRequest{
@@ -296,17 +262,17 @@ func setupCLITaskTest(ctx context.Context, t *testing.T, agentAPIHandlers map[st
require.True(t, task.WorkspaceID.Valid, "task should have a workspace ID")
workspace, err := userClient.Workspace(ctx, task.WorkspaceID.UUID)
require.NoError(t, err)
coderdtest.AwaitWorkspaceBuildJobCompleted(t, userClient, workspace.LatestBuild.ID)
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
agentClient := agentsdk.New(userClient.URL, agentsdk.WithFixedToken(authToken))
_ = agenttest.New(t, userClient.URL, authToken, func(o *agent.Options) {
agentClient := agentsdk.New(client.URL, agentsdk.WithFixedToken(authToken))
_ = agenttest.New(t, client.URL, authToken, func(o *agent.Options) {
o.Client = agentClient
})
coderdtest.NewWorkspaceAgentWaiter(t, userClient, workspace.ID).
coderdtest.NewWorkspaceAgentWaiter(t, client, workspace.ID).
WaitFor(coderdtest.AgentsReady)
return ownerClient, userClient, task
return userClient, task
}
// setupCLITaskTestWithSnapshot creates a task in the specified status with a log snapshot.
-4
View File
@@ -139,10 +139,8 @@ func (r *RootCmd) templateVersionsList() *serpent.Command {
type templateVersionRow struct {
// For json format:
TemplateVersion codersdk.TemplateVersion `table:"-"`
ActiveJSON bool `json:"active" table:"-"`
// For table format:
ID string `json:"-" table:"id"`
Name string `json:"-" table:"name,default_sort"`
CreatedAt time.Time `json:"-" table:"created at"`
CreatedBy string `json:"-" table:"created by"`
@@ -168,8 +166,6 @@ func templateVersionsToRows(activeVersionID uuid.UUID, templateVersions ...coder
rows[i] = templateVersionRow{
TemplateVersion: templateVersion,
ActiveJSON: templateVersion.ID == activeVersionID,
ID: templateVersion.ID.String(),
Name: templateVersion.Name,
CreatedAt: templateVersion.CreatedAt,
CreatedBy: templateVersion.CreatedBy.Username,
-29
View File
@@ -1,9 +1,7 @@
package cli_test
import (
"bytes"
"context"
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
@@ -42,33 +40,6 @@ func TestTemplateVersions(t *testing.T) {
pty.ExpectMatch(version.CreatedBy.Username)
pty.ExpectMatch("Active")
})
t.Run("ListVersionsJSON", func(t *testing.T) {
t.Parallel()
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
owner := coderdtest.CreateFirstUser(t, client)
member, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, nil)
_ = coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
inv, root := clitest.New(t, "templates", "versions", "list", template.Name, "--output", "json")
clitest.SetupConfig(t, member, root)
var stdout bytes.Buffer
inv.Stdout = &stdout
require.NoError(t, inv.Run())
var rows []struct {
TemplateVersion codersdk.TemplateVersion `json:"TemplateVersion"`
Active bool `json:"active"`
}
require.NoError(t, json.Unmarshal(stdout.Bytes(), &rows))
require.Len(t, rows, 1)
assert.Equal(t, version.ID, rows[0].TemplateVersion.ID)
assert.True(t, rows[0].Active)
})
}
func TestTemplateVersionsPromote(t *testing.T) {
@@ -1,4 +1,4 @@
err: WARN: Task is pending. Showing last 1 message from snapshot.
err: WARN: Task is initializing. Showing last 1 message from snapshot.
err:
out: TYPE CONTENT
out: input Single message
-2
View File
@@ -9,8 +9,6 @@ USAGE:
SUBCOMMANDS:
create Create a new organization.
delete Delete an organization
list List all organizations
members Manage organization members
roles Manage organization roles.
settings Manage organization settings.
-15
View File
@@ -1,15 +0,0 @@
coder v0.0.0-devel
USAGE:
coder organizations delete [flags] <organization_name_or_id>
Delete an organization
Aliases: rm
OPTIONS:
-y, --yes bool
Bypass confirmation prompts.
———
Run `coder --help` for a list of global options.
-21
View File
@@ -1,21 +0,0 @@
coder v0.0.0-devel
USAGE:
coder organizations list [flags]
List all organizations
Aliases: ls
List all organizations. Requires a role which grants ResourceOrganization:
read.
OPTIONS:
-c, --column [id|name|display name|icon|description|created at|updated at|default] (default: name,display name,id,default)
Columns to display in table output.
-o, --output table|json (default: table)
Output format.
———
Run `coder --help` for a list of global options.
+1 -5
View File
@@ -383,17 +383,13 @@ NETWORKING OPTIONS:
--samesite-auth-cookie lax|none, $CODER_SAMESITE_AUTH_COOKIE (default: lax)
Controls the 'SameSite' property is set on browser session cookies.
--secure-auth-cookie bool, $CODER_SECURE_AUTH_COOKIE (default: false)
--secure-auth-cookie bool, $CODER_SECURE_AUTH_COOKIE
Controls if the 'Secure' property is set on browser session cookies.
--wildcard-access-url string, $CODER_WILDCARD_ACCESS_URL
Specifies the wildcard hostname to use for workspace applications in
the form "*.example.com".
--host-prefix-cookie bool, $CODER_HOST_PREFIX_COOKIE (default: false)
Recommended to be enabled. Enables `__Host-` prefix for cookies to
guarantee they are only set by the right domain.
NETWORKING / DERP OPTIONS:
Most Coder deployments never have to think about DERP because all connections
between workspaces and users are peer-to-peer. However, when Coder cannot
-2
View File
@@ -12,8 +12,6 @@ SUBCOMMANDS:
delete Delete tasks
list List tasks
logs Show a task's logs
pause Pause a task
resume Resume a task
send Send input to a task
status Show the status of a task.
-25
View File
@@ -1,25 +0,0 @@
coder v0.0.0-devel
USAGE:
coder task pause [flags] <task>
Pause a task
- Pause a task by name:
$ coder task pause my-task
- Pause another user's task:
$ coder task pause alice/my-task
- Pause a task without confirmation:
$ coder task pause my-task --yes
OPTIONS:
-y, --yes bool
Bypass confirmation prompts.
———
Run `coder --help` for a list of global options.
-28
View File
@@ -1,28 +0,0 @@
coder v0.0.0-devel
USAGE:
coder task resume [flags] <task>
Resume a task
- Resume a task by name:
$ coder task resume my-task
- Resume another user's task:
$ coder task resume alice/my-task
- Resume a task without confirmation:
$ coder task resume my-task --yes
OPTIONS:
--no-wait bool
Return immediately after resuming the task.
-y, --yes bool
Bypass confirmation prompts.
———
Run `coder --help` for a list of global options.
+1 -1
View File
@@ -9,7 +9,7 @@ OPTIONS:
-O, --org string, $CODER_ORGANIZATION
Select which organization (uuid or name) to use.
-c, --column [id|name|created at|created by|status|active|archived] (default: name,created at,created by,status,active)
-c, --column [name|created at|created by|status|active|archived] (default: name,created at,created by,status,active)
Columns to display in table output.
--include-archived bool
+1 -1
View File
@@ -27,7 +27,7 @@ USAGE:
SUBCOMMANDS:
create Create a token
list List tokens
remove Expire or delete a token
remove Delete a token
view Display detailed information about a token
———
-4
View File
@@ -15,10 +15,6 @@ OPTIONS:
-c, --column [id|name|scopes|allow list|last used|expires at|created at|owner] (default: id,name,scopes,allow list,last used,expires at,created at)
Columns to display in table output.
--include-expired bool
Include expired tokens in the output. By default, expired tokens are
hidden.
-o, --output table|json (default: table)
Output format.
+2 -10
View File
@@ -1,19 +1,11 @@
coder v0.0.0-devel
USAGE:
coder tokens remove [flags] <name|id|token>
coder tokens remove <name|id|token>
Expire or delete a token
Delete a token
Aliases: delete, rm
Remove a token by expiring it. Use --delete to permanently hard-delete the
token instead.
OPTIONS:
--delete bool
Permanently delete the token instead of expiring it. This removes the
audit trail.
———
Run `coder --help` for a list of global options.
+1 -10
View File
@@ -176,15 +176,11 @@ networking:
# (default: <unset>, type: string-array)
proxyTrustedOrigins: []
# Controls if the 'Secure' property is set on browser session cookies.
# (default: false, type: bool)
# (default: <unset>, type: bool)
secureAuthCookie: false
# Controls the 'SameSite' property is set on browser session cookies.
# (default: lax, type: enum[lax\|none])
sameSiteAuthCookie: lax
# Recommended to be enabled. Enables `__Host-` prefix for cookies to guarantee
# they are only set by the right domain.
# (default: false, type: bool)
hostPrefixCookie: false
# Whether Coder only allows connections to workspaces via the browser.
# (default: <unset>, type: bool)
browserOnly: false
@@ -421,11 +417,6 @@ oidc:
# an insecure OIDC configuration. It is not recommended to use this flag.
# (default: <unset>, type: bool)
dangerousSkipIssuerChecks: false
# Optional override of the default redirect url which uses the deployment's access
# url. Useful in situations where a deployment has more than 1 domain. Using this
# setting can also break OIDC, so use with caution.
# (default: <unset>, type: url)
oidc-redirect-url:
# Telemetry is critical to our ability to improve Coder. We strip all personal
# information before sending data to our servers. Please only disable telemetry
# when required by your organization's security policy.
+13 -49
View File
@@ -218,10 +218,9 @@ func (r *RootCmd) listTokens() *serpent.Command {
}
var (
all bool
includeExpired bool
displayTokens []tokenListRow
formatter = cliui.NewOutputFormatter(
all bool
displayTokens []tokenListRow
formatter = cliui.NewOutputFormatter(
cliui.TableFormat([]tokenListRow{}, defaultCols),
cliui.JSONFormat(),
)
@@ -247,20 +246,6 @@ func (r *RootCmd) listTokens() *serpent.Command {
return xerrors.Errorf("list tokens: %w", err)
}
// Filter out expired tokens unless --include-expired is set
// TODO(Cian): This _could_ get too big for client-side filtering.
// If it causes issues, we can filter server-side.
if !includeExpired {
now := time.Now()
filtered := make([]codersdk.APIKeyWithOwner, 0, len(tokens))
for _, token := range tokens {
if token.ExpiresAt.After(now) {
filtered = append(filtered, token)
}
}
tokens = filtered
}
displayTokens = make([]tokenListRow, len(tokens))
for i, token := range tokens {
@@ -289,12 +274,6 @@ func (r *RootCmd) listTokens() *serpent.Command {
Description: "Specifies whether all users' tokens will be listed or not (must have Owner role to see all tokens).",
Value: serpent.BoolOf(&all),
},
{
Name: "include-expired",
Flag: "include-expired",
Description: "Include expired tokens in the output. By default, expired tokens are hidden.",
Value: serpent.BoolOf(&includeExpired),
},
}
formatter.AttachOptions(&cmd.Options)
@@ -344,13 +323,10 @@ func (r *RootCmd) viewToken() *serpent.Command {
}
func (r *RootCmd) removeToken() *serpent.Command {
var deleteToken bool
cmd := &serpent.Command{
Use: "remove <name|id|token>",
Aliases: []string{"delete"},
Short: "Expire or delete a token",
Long: "Remove a token by expiring it. Use --delete to permanently hard-" +
"delete the token instead.",
Short: "Delete a token",
Middleware: serpent.Chain(
serpent.RequireNArgs(1),
),
@@ -362,7 +338,7 @@ func (r *RootCmd) removeToken() *serpent.Command {
token, err := client.APIKeyByName(inv.Context(), codersdk.Me, inv.Args[0])
if err != nil {
// If it's a token, we need to extract the ID.
// If it's a token, we need to extract the ID
maybeID := strings.Split(inv.Args[0], "-")[0]
token, err = client.APIKeyByID(inv.Context(), codersdk.Me, maybeID)
if err != nil {
@@ -370,29 +346,17 @@ func (r *RootCmd) removeToken() *serpent.Command {
}
}
if deleteToken {
err = client.DeleteAPIKey(inv.Context(), codersdk.Me, token.ID)
if err != nil {
return xerrors.Errorf("delete api key: %w", err)
}
cliui.Infof(inv.Stdout, "Token has been deleted.")
return nil
}
err = client.ExpireAPIKey(inv.Context(), codersdk.Me, token.ID)
err = client.DeleteAPIKey(inv.Context(), codersdk.Me, token.ID)
if err != nil {
return xerrors.Errorf("expire api key: %w", err)
return xerrors.Errorf("delete api key: %w", err)
}
cliui.Infof(inv.Stdout, "Token has been expired.")
return nil
},
}
cmd.Options = serpent.OptionSet{
{
Flag: "delete",
Description: "Permanently delete the token instead of expiring it. This removes the audit trail.",
Value: serpent.BoolOf(&deleteToken),
cliui.Infof(
inv.Stdout,
"Token has been deleted.",
)
return nil
},
}
+17 -153
View File
@@ -6,16 +6,12 @@ import (
"encoding/json"
"fmt"
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/coder/coder/v2/cli/clitest"
"github.com/coder/coder/v2/coderd/coderdtest"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbgen"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/testutil"
)
@@ -26,7 +22,7 @@ func TestTokens(t *testing.T) {
adminUser := coderdtest.CreateFirstUser(t, client)
secondUserClient, secondUser := coderdtest.CreateAnotherUser(t, client, adminUser.OrganizationID)
thirdUserClient, thirdUser := coderdtest.CreateAnotherUser(t, client, adminUser.OrganizationID)
_, thirdUser := coderdtest.CreateAnotherUser(t, client, adminUser.OrganizationID)
ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancelFunc()
@@ -159,7 +155,7 @@ func TestTokens(t *testing.T) {
require.Len(t, scopedToken.AllowList, 1)
require.Equal(t, allowSpec, scopedToken.AllowList[0].String())
// Delete by name (default behavior is now expire)
// Delete by name
inv, root = clitest.New(t, "tokens", "rm", "token-one")
clitest.SetupConfig(t, client, root)
buf = new(bytes.Buffer)
@@ -168,42 +164,21 @@ func TestTokens(t *testing.T) {
require.NoError(t, err)
res = buf.String()
require.NotEmpty(t, res)
require.Contains(t, res, "expired")
// Regular users cannot expire other users' tokens (expire is default now).
inv, root = clitest.New(t, "tokens", "rm", secondTokenID)
clitest.SetupConfig(t, thirdUserClient, root)
buf = new(bytes.Buffer)
inv.Stdout = buf
err = inv.WithContext(ctx).Run()
require.Error(t, err)
require.Contains(t, err.Error(), "not found")
// Only admin users can expire other users' tokens (expire is default now).
inv, root = clitest.New(t, "tokens", "rm", secondTokenID)
clitest.SetupConfig(t, client, root)
buf = new(bytes.Buffer)
inv.Stdout = buf
err = inv.WithContext(ctx).Run()
require.NoError(t, err)
// Validate that token was expired
if token, err := client.APIKeyByName(ctx, secondUser.ID.String(), "token-two"); assert.NoError(t, err) {
require.True(t, token.ExpiresAt.Before(time.Now()))
}
// Delete by ID (explicit delete flag)
inv, root = clitest.New(t, "tokens", "rm", "--delete", secondTokenID)
clitest.SetupConfig(t, client, root)
buf = new(bytes.Buffer)
inv.Stdout = buf
err = inv.WithContext(ctx).Run()
require.NoError(t, err)
res = buf.String()
require.NotEmpty(t, res)
require.Contains(t, res, "deleted")
// Delete scoped token by ID (explicit delete flag)
inv, root = clitest.New(t, "tokens", "rm", "--delete", scopedTokenID)
// Delete by ID
inv, root = clitest.New(t, "tokens", "rm", secondTokenID)
clitest.SetupConfig(t, client, root)
buf = new(bytes.Buffer)
inv.Stdout = buf
err = inv.WithContext(ctx).Run()
require.NoError(t, err)
res = buf.String()
require.NotEmpty(t, res)
require.Contains(t, res, "deleted")
// Delete scoped token by ID
inv, root = clitest.New(t, "tokens", "rm", scopedTokenID)
clitest.SetupConfig(t, client, root)
buf = new(bytes.Buffer)
inv.Stdout = buf
@@ -224,8 +199,8 @@ func TestTokens(t *testing.T) {
require.NotEmpty(t, res)
fourthToken := res
// Delete by token (explicit delete flag)
inv, root = clitest.New(t, "tokens", "rm", "--delete", fourthToken)
// Delete by token
inv, root = clitest.New(t, "tokens", "rm", fourthToken)
clitest.SetupConfig(t, client, root)
buf = new(bytes.Buffer)
inv.Stdout = buf
@@ -235,114 +210,3 @@ func TestTokens(t *testing.T) {
require.NotEmpty(t, res)
require.Contains(t, res, "deleted")
}
func TestTokensListExpiredFiltering(t *testing.T) {
t.Parallel()
client, _, api := coderdtest.NewWithAPI(t, nil)
owner := coderdtest.CreateFirstUser(t, client)
// Create a valid (non-expired) token
validToken, _ := dbgen.APIKey(t, api.Database, database.APIKey{
UserID: owner.UserID,
ExpiresAt: time.Now().Add(24 * time.Hour),
LoginType: database.LoginTypeToken,
TokenName: "valid-token",
})
// Create an expired token
expiredToken, _ := dbgen.APIKey(t, api.Database, database.APIKey{
UserID: owner.UserID,
ExpiresAt: time.Now().Add(-24 * time.Hour),
LoginType: database.LoginTypeToken,
TokenName: "expired-token",
})
t.Run("HidesExpiredByDefault", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
inv, root := clitest.New(t, "tokens", "ls")
clitest.SetupConfig(t, client, root)
buf := new(bytes.Buffer)
inv.Stdout = buf
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
res := buf.String()
require.Contains(t, res, validToken.ID)
require.Contains(t, res, "valid-token")
require.NotContains(t, res, expiredToken.ID)
require.NotContains(t, res, "expired-token")
})
t.Run("ShowsExpiredWithFlag", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
inv, root := clitest.New(t, "tokens", "ls", "--include-expired")
clitest.SetupConfig(t, client, root)
buf := new(bytes.Buffer)
inv.Stdout = buf
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
res := buf.String()
require.Contains(t, res, validToken.ID)
require.Contains(t, res, "valid-token")
require.Contains(t, res, expiredToken.ID)
require.Contains(t, res, "expired-token")
})
t.Run("JSONOutputRespectsFilter", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
// Default (no expired)
inv, root := clitest.New(t, "tokens", "ls", "--output=json")
clitest.SetupConfig(t, client, root)
buf := new(bytes.Buffer)
inv.Stdout = buf
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
res := buf.String()
require.Contains(t, res, "valid-token")
require.NotContains(t, res, "expired-token")
// With --include-expired
inv, root = clitest.New(t, "tokens", "ls", "--output=json", "--include-expired")
clitest.SetupConfig(t, client, root)
buf = new(bytes.Buffer)
inv.Stdout = buf
err = inv.WithContext(ctx).Run()
require.NoError(t, err)
res = buf.String()
require.Contains(t, res, "valid-token")
require.Contains(t, res, "expired-token")
})
t.Run("AllUsersWithIncludeExpired", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
inv, root := clitest.New(t, "tokens", "ls", "--all", "--include-expired")
clitest.SetupConfig(t, client, root)
buf := new(bytes.Buffer)
inv.Stdout = buf
err := inv.WithContext(ctx).Run()
require.NoError(t, err)
res := buf.String()
// Should show both valid and expired tokens
require.Contains(t, res, validToken.ID)
require.Contains(t, res, "valid-token")
require.Contains(t, res, expiredToken.ID)
require.Contains(t, res, "expired-token")
})
}
+24
View File
@@ -0,0 +1,24 @@
//go:build !windows && !darwin
package cli
import (
"golang.org/x/xerrors"
"github.com/coder/serpent"
)
func (*RootCmd) vpnDaemonRun() *serpent.Command {
cmd := &serpent.Command{
Use: "run",
Short: "Run the VPN daemon on Windows.",
Middleware: serpent.Chain(
serpent.RequireNArgs(0),
),
Handler: func(_ *serpent.Invocation) error {
return xerrors.New("vpn-daemon subcommand is not supported on this platform")
},
}
return cmd
}
@@ -1,4 +1,4 @@
//go:build windows || linux
//go:build windows
package cli
@@ -11,7 +11,7 @@ import (
"github.com/coder/serpent"
)
func (*RootCmd) vpnDaemonRun() *serpent.Command {
func (r *RootCmd) vpnDaemonRun() *serpent.Command {
var (
rpcReadHandleInt int64
rpcWriteHandleInt int64
@@ -19,7 +19,7 @@ func (*RootCmd) vpnDaemonRun() *serpent.Command {
cmd := &serpent.Command{
Use: "run",
Short: "Run the VPN daemon on Windows and Linux.",
Short: "Run the VPN daemon on Windows.",
Middleware: serpent.Chain(
serpent.RequireNArgs(0),
),
@@ -53,8 +53,8 @@ func (*RootCmd) vpnDaemonRun() *serpent.Command {
return xerrors.Errorf("rpc-read-handle (%v) and rpc-write-handle (%v) must be different", rpcReadHandleInt, rpcWriteHandleInt)
}
// The manager passes the read and write descriptors directly to the
// daemon, so we can open the RPC pipe from the raw values.
// We don't need to worry about duplicating the handles on Windows,
// which is different from Unix.
logger.Info(ctx, "opening bidirectional RPC pipe", slog.F("rpc_read_handle", rpcReadHandleInt), slog.F("rpc_write_handle", rpcWriteHandleInt))
pipe, err := vpn.NewBidirectionalPipe(uintptr(rpcReadHandleInt), uintptr(rpcWriteHandleInt))
if err != nil {
@@ -62,7 +62,7 @@ func (*RootCmd) vpnDaemonRun() *serpent.Command {
}
defer pipe.Close()
logger.Info(ctx, "starting VPN tunnel")
logger.Info(ctx, "starting tunnel")
tunnel, err := vpn.NewTunnel(ctx, logger, pipe, vpn.NewClient(), vpn.UseOSNetworkingStack())
if err != nil {
return xerrors.Errorf("create new tunnel for client: %w", err)
@@ -1,19 +0,0 @@
//go:build linux
package cli_test
import (
"os"
"testing"
"github.com/stretchr/testify/require"
"golang.org/x/sys/unix"
)
func dupHandle(t *testing.T, f *os.File) uintptr {
t.Helper()
dupFD, err := unix.Dup(int(f.Fd()))
require.NoError(t, err)
return uintptr(dupFD)
}
@@ -1,33 +0,0 @@
//go:build windows
package cli_test
import (
"os"
"syscall"
"testing"
"github.com/stretchr/testify/require"
)
func dupHandle(t *testing.T, f *os.File) uintptr {
t.Helper()
src := syscall.Handle(f.Fd())
var dup syscall.Handle
proc, err := syscall.GetCurrentProcess()
require.NoError(t, err)
err = syscall.DuplicateHandle(
proc,
src,
proc,
&dup,
0,
false,
syscall.DUPLICATE_SAME_ACCESS,
)
require.NoError(t, err)
return uintptr(dup)
}
@@ -1,4 +1,4 @@
//go:build windows || linux
//go:build windows
package cli_test
@@ -67,35 +67,22 @@ func TestVPNDaemonRun(t *testing.T) {
r1, w1, err := os.Pipe()
require.NoError(t, err)
defer r1.Close()
defer w1.Close()
r2, w2, err := os.Pipe()
require.NoError(t, err)
defer r2.Close()
// The daemon closes the handles passed via NewBidirectionalPipe. Since our
// CLI tests run in-process, pass duplicated handles so we can close the
// originals without risking a double-close on FD reuse.
rpcReadHandle := dupHandle(t, r1)
rpcWriteHandle := dupHandle(t, w2)
require.NoError(t, r1.Close())
require.NoError(t, w2.Close())
defer w2.Close()
ctx := testutil.Context(t, testutil.WaitLong)
inv, _ := clitest.New(t,
"vpn-daemon",
"run",
"--rpc-read-handle",
fmt.Sprint(rpcReadHandle),
"--rpc-write-handle",
fmt.Sprint(rpcWriteHandle),
)
inv, _ := clitest.New(t, "vpn-daemon", "run", "--rpc-read-handle", fmt.Sprint(r1.Fd()), "--rpc-write-handle", fmt.Sprint(w2.Fd()))
waiter := clitest.StartWithWaiter(t, inv.WithContext(ctx))
// Send an invalid header, including a newline delimiter, so the handshake
// fails without requiring context cancellation.
_, err = w1.Write([]byte("garbage\n"))
// Send garbage which should cause the handshake to fail and the daemon
// to exit.
_, err = w1.Write([]byte("garbage"))
require.NoError(t, err)
waiter.Cancel()
err = waiter.Wait()
require.ErrorContains(t, err, "handshake failed")
})
-2
View File
@@ -89,7 +89,6 @@ type Options struct {
PublishWorkspaceAgentLogsUpdateFn func(ctx context.Context, workspaceAgentID uuid.UUID, msg agentsdk.LogsNotifyMessage)
NetworkTelemetryHandler func(batch []*tailnetproto.TelemetryEvent)
BoundaryUsageTracker *boundaryusage.Tracker
LifecycleMetrics *LifecycleMetrics
AccessURL *url.URL
AppHostname string
@@ -171,7 +170,6 @@ func New(opts Options, workspace database.Workspace) *API {
Database: opts.Database,
Log: opts.Log,
PublishWorkspaceUpdateFn: api.publishWorkspaceUpdate,
Metrics: opts.LifecycleMetrics,
}
api.AppsAPI = &AppsAPI{
+1 -15
View File
@@ -4,7 +4,6 @@ import (
"context"
"database/sql"
"slices"
"sync"
"time"
"github.com/google/uuid"
@@ -32,9 +31,7 @@ type LifecycleAPI struct {
Log slog.Logger
PublishWorkspaceUpdateFn func(context.Context, *database.WorkspaceAgent, wspubsub.WorkspaceEventKind) error
TimeNowFn func() time.Time // defaults to dbtime.Now()
Metrics *LifecycleMetrics
emitMetricsOnce sync.Once
TimeNowFn func() time.Time // defaults to dbtime.Now()
}
func (a *LifecycleAPI) now() time.Time {
@@ -128,17 +125,6 @@ func (a *LifecycleAPI) UpdateLifecycle(ctx context.Context, req *agentproto.Upda
}
}
// Emit build duration metric when agent transitions to a terminal startup state.
// We only emit once per agent connection to avoid duplicate metrics.
switch lifecycleState {
case database.WorkspaceAgentLifecycleStateReady,
database.WorkspaceAgentLifecycleStateStartTimeout,
database.WorkspaceAgentLifecycleStateStartError:
a.emitMetricsOnce.Do(func() {
a.emitBuildDurationMetric(ctx, workspaceAgent.ResourceID)
})
}
return req.Lifecycle, nil
}
-260
View File
@@ -9,14 +9,12 @@ import (
"time"
"github.com/google/uuid"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/require"
"go.uber.org/mock/gomock"
"google.golang.org/protobuf/types/known/timestamppb"
agentproto "github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/coderd/agentapi"
"github.com/coder/coder/v2/coderd/coderdtest/promhelp"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbmock"
"github.com/coder/coder/v2/coderd/database/dbtime"
@@ -24,10 +22,6 @@ import (
"github.com/coder/coder/v2/testutil"
)
// fullMetricName is the fully-qualified Prometheus metric name
// (namespace + name) used for gathering in tests.
const fullMetricName = "coderd_" + agentapi.BuildDurationMetricName
func TestUpdateLifecycle(t *testing.T) {
t.Parallel()
@@ -36,12 +30,6 @@ func TestUpdateLifecycle(t *testing.T) {
someTime = dbtime.Time(someTime)
now := dbtime.Now()
// Fixed times for build duration metric assertions.
// The expected duration is exactly 90 seconds.
buildCreatedAt := dbtime.Time(time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC))
agentReadyAt := dbtime.Time(time.Date(2025, 1, 1, 0, 1, 30, 0, time.UTC))
expectedDuration := agentReadyAt.Sub(buildCreatedAt).Seconds() // 90.0
var (
workspaceID = uuid.New()
agentCreated = database.WorkspaceAgent{
@@ -117,19 +105,6 @@ func TestUpdateLifecycle(t *testing.T) {
Valid: true,
},
}).Return(nil)
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agentStarting.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
CreatedAt: buildCreatedAt,
Transition: database.WorkspaceTransitionStart,
TemplateName: "test-template",
OrganizationName: "test-org",
IsPrebuild: false,
AllAgentsReady: true,
LastAgentReadyAt: agentReadyAt,
WorstStatus: "success",
}, nil)
reg := prometheus.NewRegistry()
metrics := agentapi.NewLifecycleMetrics(reg)
api := &agentapi.LifecycleAPI{
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
@@ -138,7 +113,6 @@ func TestUpdateLifecycle(t *testing.T) {
WorkspaceID: workspaceID,
Database: dbM,
Log: testutil.Logger(t),
Metrics: metrics,
// Test that nil publish fn works.
PublishWorkspaceUpdateFn: nil,
}
@@ -148,16 +122,6 @@ func TestUpdateLifecycle(t *testing.T) {
})
require.NoError(t, err)
require.Equal(t, lifecycle, resp)
got := promhelp.HistogramValue(t, reg, fullMetricName, prometheus.Labels{
"template_name": "test-template",
"organization_name": "test-org",
"transition": "start",
"status": "success",
"is_prebuild": "false",
})
require.Equal(t, uint64(1), got.GetSampleCount())
require.Equal(t, expectedDuration, got.GetSampleSum())
})
// This test jumps from CREATING to READY, skipping STARTED. Both the
@@ -183,21 +147,8 @@ func TestUpdateLifecycle(t *testing.T) {
Valid: true,
},
}).Return(nil)
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agentCreated.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
CreatedAt: buildCreatedAt,
Transition: database.WorkspaceTransitionStart,
TemplateName: "test-template",
OrganizationName: "test-org",
IsPrebuild: false,
AllAgentsReady: true,
LastAgentReadyAt: agentReadyAt,
WorstStatus: "success",
}, nil)
publishCalled := false
reg := prometheus.NewRegistry()
metrics := agentapi.NewLifecycleMetrics(reg)
api := &agentapi.LifecycleAPI{
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
return agentCreated, nil
@@ -205,7 +156,6 @@ func TestUpdateLifecycle(t *testing.T) {
WorkspaceID: workspaceID,
Database: dbM,
Log: testutil.Logger(t),
Metrics: metrics,
PublishWorkspaceUpdateFn: func(ctx context.Context, agent *database.WorkspaceAgent, kind wspubsub.WorkspaceEventKind) error {
publishCalled = true
return nil
@@ -218,16 +168,6 @@ func TestUpdateLifecycle(t *testing.T) {
require.NoError(t, err)
require.Equal(t, lifecycle, resp)
require.True(t, publishCalled)
got := promhelp.HistogramValue(t, reg, fullMetricName, prometheus.Labels{
"template_name": "test-template",
"organization_name": "test-org",
"transition": "start",
"status": "success",
"is_prebuild": "false",
})
require.Equal(t, uint64(1), got.GetSampleCount())
require.Equal(t, expectedDuration, got.GetSampleSum())
})
t.Run("NoTimeSpecified", func(t *testing.T) {
@@ -254,19 +194,6 @@ func TestUpdateLifecycle(t *testing.T) {
Valid: true,
},
})
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agentCreated.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
CreatedAt: buildCreatedAt,
Transition: database.WorkspaceTransitionStart,
TemplateName: "test-template",
OrganizationName: "test-org",
IsPrebuild: false,
AllAgentsReady: true,
LastAgentReadyAt: agentReadyAt,
WorstStatus: "success",
}, nil)
reg := prometheus.NewRegistry()
metrics := agentapi.NewLifecycleMetrics(reg)
api := &agentapi.LifecycleAPI{
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
@@ -275,7 +202,6 @@ func TestUpdateLifecycle(t *testing.T) {
WorkspaceID: workspaceID,
Database: dbM,
Log: testutil.Logger(t),
Metrics: metrics,
PublishWorkspaceUpdateFn: nil,
TimeNowFn: func() time.Time {
return now
@@ -287,16 +213,6 @@ func TestUpdateLifecycle(t *testing.T) {
})
require.NoError(t, err)
require.Equal(t, lifecycle, resp)
got := promhelp.HistogramValue(t, reg, fullMetricName, prometheus.Labels{
"template_name": "test-template",
"organization_name": "test-org",
"transition": "start",
"status": "success",
"is_prebuild": "false",
})
require.Equal(t, uint64(1), got.GetSampleCount())
require.Equal(t, expectedDuration, got.GetSampleSum())
})
t.Run("AllStates", func(t *testing.T) {
@@ -312,9 +228,6 @@ func TestUpdateLifecycle(t *testing.T) {
dbM := dbmock.NewMockStore(gomock.NewController(t))
var publishCalled int64
reg := prometheus.NewRegistry()
metrics := agentapi.NewLifecycleMetrics(reg)
api := &agentapi.LifecycleAPI{
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
return agent, nil
@@ -322,7 +235,6 @@ func TestUpdateLifecycle(t *testing.T) {
WorkspaceID: workspaceID,
Database: dbM,
Log: testutil.Logger(t),
Metrics: metrics,
PublishWorkspaceUpdateFn: func(ctx context.Context, agent *database.WorkspaceAgent, kind wspubsub.WorkspaceEventKind) error {
atomic.AddInt64(&publishCalled, 1)
return nil
@@ -365,20 +277,6 @@ func TestUpdateLifecycle(t *testing.T) {
ReadyAt: expectedReadyAt,
}).Times(1).Return(nil)
// The first ready state triggers the build duration metric query.
if state == agentproto.Lifecycle_READY || state == agentproto.Lifecycle_START_TIMEOUT || state == agentproto.Lifecycle_START_ERROR {
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agent.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
CreatedAt: someTime,
Transition: database.WorkspaceTransitionStart,
TemplateName: "test-template",
OrganizationName: "test-org",
IsPrebuild: false,
AllAgentsReady: true,
LastAgentReadyAt: stateNow,
WorstStatus: "success",
}, nil).MaxTimes(1)
}
resp, err := api.UpdateLifecycle(context.Background(), &agentproto.UpdateLifecycleRequest{
Lifecycle: lifecycle,
})
@@ -424,164 +322,6 @@ func TestUpdateLifecycle(t *testing.T) {
require.Nil(t, resp)
require.False(t, publishCalled)
})
// Test that metric is NOT emitted when not all agents are ready (multi-agent case).
t.Run("MetricNotEmittedWhenNotAllAgentsReady", func(t *testing.T) {
t.Parallel()
lifecycle := &agentproto.Lifecycle{
State: agentproto.Lifecycle_READY,
ChangedAt: timestamppb.New(now),
}
dbM := dbmock.NewMockStore(gomock.NewController(t))
dbM.EXPECT().UpdateWorkspaceAgentLifecycleStateByID(gomock.Any(), gomock.Any()).Return(nil)
// Return AllAgentsReady = false to simulate multi-agent case where not all are ready.
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agentStarting.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
CreatedAt: someTime,
Transition: database.WorkspaceTransitionStart,
TemplateName: "test-template",
OrganizationName: "test-org",
IsPrebuild: false,
AllAgentsReady: false, // Not all agents ready yet
LastAgentReadyAt: time.Time{}, // No ready time yet
WorstStatus: "success",
}, nil)
reg := prometheus.NewRegistry()
metrics := agentapi.NewLifecycleMetrics(reg)
api := &agentapi.LifecycleAPI{
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
return agentStarting, nil
},
WorkspaceID: workspaceID,
Database: dbM,
Log: testutil.Logger(t),
Metrics: metrics,
PublishWorkspaceUpdateFn: nil,
}
resp, err := api.UpdateLifecycle(context.Background(), &agentproto.UpdateLifecycleRequest{
Lifecycle: lifecycle,
})
require.NoError(t, err)
require.Equal(t, lifecycle, resp)
require.Nil(t, promhelp.MetricValue(t, reg, fullMetricName, prometheus.Labels{
"template_name": "test-template",
"organization_name": "test-org",
"transition": "start",
"status": "success",
"is_prebuild": "false",
}), "metric should not be emitted when not all agents are ready")
})
// Test that prebuild label is "true" when owner is prebuild system user.
t.Run("PrebuildLabelTrue", func(t *testing.T) {
t.Parallel()
lifecycle := &agentproto.Lifecycle{
State: agentproto.Lifecycle_READY,
ChangedAt: timestamppb.New(now),
}
dbM := dbmock.NewMockStore(gomock.NewController(t))
dbM.EXPECT().UpdateWorkspaceAgentLifecycleStateByID(gomock.Any(), gomock.Any()).Return(nil)
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agentStarting.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
CreatedAt: buildCreatedAt,
Transition: database.WorkspaceTransitionStart,
TemplateName: "test-template",
OrganizationName: "test-org",
IsPrebuild: true, // Prebuild workspace
AllAgentsReady: true,
LastAgentReadyAt: agentReadyAt,
WorstStatus: "success",
}, nil)
reg := prometheus.NewRegistry()
metrics := agentapi.NewLifecycleMetrics(reg)
api := &agentapi.LifecycleAPI{
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
return agentStarting, nil
},
WorkspaceID: workspaceID,
Database: dbM,
Log: testutil.Logger(t),
Metrics: metrics,
PublishWorkspaceUpdateFn: nil,
}
resp, err := api.UpdateLifecycle(context.Background(), &agentproto.UpdateLifecycleRequest{
Lifecycle: lifecycle,
})
require.NoError(t, err)
require.Equal(t, lifecycle, resp)
got := promhelp.HistogramValue(t, reg, fullMetricName, prometheus.Labels{
"template_name": "test-template",
"organization_name": "test-org",
"transition": "start",
"status": "success",
"is_prebuild": "true",
})
require.Equal(t, uint64(1), got.GetSampleCount())
require.Equal(t, expectedDuration, got.GetSampleSum())
})
// Test worst status is used when one agent has an error.
t.Run("WorstStatusError", func(t *testing.T) {
t.Parallel()
lifecycle := &agentproto.Lifecycle{
State: agentproto.Lifecycle_READY,
ChangedAt: timestamppb.New(now),
}
dbM := dbmock.NewMockStore(gomock.NewController(t))
dbM.EXPECT().UpdateWorkspaceAgentLifecycleStateByID(gomock.Any(), gomock.Any()).Return(nil)
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agentStarting.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
CreatedAt: buildCreatedAt,
Transition: database.WorkspaceTransitionStart,
TemplateName: "test-template",
OrganizationName: "test-org",
IsPrebuild: false,
AllAgentsReady: true,
LastAgentReadyAt: agentReadyAt,
WorstStatus: "error", // One agent had an error
}, nil)
reg := prometheus.NewRegistry()
metrics := agentapi.NewLifecycleMetrics(reg)
api := &agentapi.LifecycleAPI{
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
return agentStarting, nil
},
WorkspaceID: workspaceID,
Database: dbM,
Log: testutil.Logger(t),
Metrics: metrics,
PublishWorkspaceUpdateFn: nil,
}
resp, err := api.UpdateLifecycle(context.Background(), &agentproto.UpdateLifecycleRequest{
Lifecycle: lifecycle,
})
require.NoError(t, err)
require.Equal(t, lifecycle, resp)
got := promhelp.HistogramValue(t, reg, fullMetricName, prometheus.Labels{
"template_name": "test-template",
"organization_name": "test-org",
"transition": "start",
"status": "error",
"is_prebuild": "false",
})
require.Equal(t, uint64(1), got.GetSampleCount())
require.Equal(t, expectedDuration, got.GetSampleSum())
})
}
func TestUpdateStartup(t *testing.T) {
-6
View File
@@ -249,17 +249,11 @@ func dbAppToProto(dbApp database.WorkspaceApp, agent database.WorkspaceAgent, ow
func dbAgentDevcontainersToProto(devcontainers []database.WorkspaceAgentDevcontainer) []*agentproto.WorkspaceAgentDevcontainer {
ret := make([]*agentproto.WorkspaceAgentDevcontainer, len(devcontainers))
for i, dc := range devcontainers {
var subagentID []byte
if dc.SubagentID.Valid {
subagentID = dc.SubagentID.UUID[:]
}
ret[i] = &agentproto.WorkspaceAgentDevcontainer{
Id: dc.ID[:],
Name: dc.Name,
WorkspaceFolder: dc.WorkspaceFolder,
ConfigPath: dc.ConfigPath,
SubagentId: subagentID,
}
}
return ret
-97
View File
@@ -1,97 +0,0 @@
package agentapi
import (
"context"
"strconv"
"time"
"github.com/google/uuid"
"github.com/prometheus/client_golang/prometheus"
"cdr.dev/slog/v3"
)
// BuildDurationMetricName is the short name for the end-to-end
// workspace build duration histogram. The full metric name is
// prefixed with the namespace "coderd_".
const BuildDurationMetricName = "template_workspace_build_duration_seconds"
// LifecycleMetrics contains Prometheus metrics for the lifecycle API.
type LifecycleMetrics struct {
BuildDuration *prometheus.HistogramVec
}
// NewLifecycleMetrics creates and registers all lifecycle-related
// Prometheus metrics.
//
// The build duration histogram tracks the end-to-end duration from
// workspace build creation to agent ready, by template. It is
// recorded by the coderd replica handling the agent's connection
// when the last agent reports ready. In multi-replica deployments,
// each replica only has observations for agents it handles.
//
// The "is_prebuild" label distinguishes prebuild creation (background,
// no user waiting) from user-initiated builds (regular workspace
// creation or prebuild claims).
func NewLifecycleMetrics(reg prometheus.Registerer) *LifecycleMetrics {
m := &LifecycleMetrics{
BuildDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "coderd",
Name: BuildDurationMetricName,
Help: "Duration from workspace build creation to agent ready, by template.",
Buckets: []float64{
1, // 1s
10,
30,
60, // 1min
60 * 5,
60 * 10,
60 * 30, // 30min
60 * 60, // 1hr
},
NativeHistogramBucketFactor: 1.1,
NativeHistogramMaxBucketNumber: 100,
NativeHistogramMinResetDuration: time.Hour,
}, []string{"template_name", "organization_name", "transition", "status", "is_prebuild"}),
}
reg.MustRegister(m.BuildDuration)
return m
}
// emitBuildDurationMetric records the end-to-end workspace build
// duration from build creation to when all agents are ready.
func (a *LifecycleAPI) emitBuildDurationMetric(ctx context.Context, resourceID uuid.UUID) {
if a.Metrics == nil {
return
}
buildInfo, err := a.Database.GetWorkspaceBuildMetricsByResourceID(ctx, resourceID)
if err != nil {
a.Log.Warn(ctx, "failed to get build info for metrics", slog.Error(err))
return
}
// Wait until all agents have reached a terminal startup state.
if !buildInfo.AllAgentsReady {
return
}
// LastAgentReadyAt is the MAX(ready_at) across all agents. Since
// we only get here when AllAgentsReady is true, this should always
// be valid.
if buildInfo.LastAgentReadyAt.IsZero() {
a.Log.Warn(ctx, "last_agent_ready_at is unexpectedly zero",
slog.F("last_agent_ready_at", buildInfo.LastAgentReadyAt))
return
}
duration := buildInfo.LastAgentReadyAt.Sub(buildInfo.CreatedAt).Seconds()
a.Metrics.BuildDuration.WithLabelValues(
buildInfo.TemplateName,
buildInfo.OrganizationName,
string(buildInfo.Transition),
buildInfo.WorstStatus,
strconv.FormatBool(buildInfo.IsPrebuild),
).Observe(duration)
}
+20 -57
View File
@@ -37,6 +37,25 @@ func (a *SubAgentAPI) CreateSubAgent(ctx context.Context, req *agentproto.Create
//nolint:gocritic // This gives us only the permissions required to do the job.
ctx = dbauthz.AsSubAgentAPI(ctx, a.OrganizationID, a.OwnerID)
parentAgent, err := a.AgentFn(ctx)
if err != nil {
return nil, xerrors.Errorf("get parent agent: %w", err)
}
agentName := req.Name
if agentName == "" {
return nil, codersdk.ValidationError{
Field: "name",
Detail: "agent name cannot be empty",
}
}
if !provisioner.AgentNameRegex.MatchString(agentName) {
return nil, codersdk.ValidationError{
Field: "name",
Detail: fmt.Sprintf("agent name %q does not match regex %q", agentName, provisioner.AgentNameRegex),
}
}
createdAt := a.Clock.Now()
displayApps := make([]database.DisplayApp, 0, len(req.DisplayApps))
@@ -64,62 +83,6 @@ func (a *SubAgentAPI) CreateSubAgent(ctx context.Context, req *agentproto.Create
displayApps = append(displayApps, app)
}
parentAgent, err := a.AgentFn(ctx)
if err != nil {
return nil, xerrors.Errorf("get parent agent: %w", err)
}
// An ID is only given in the request when it is a terraform-defined devcontainer
// that has attached resources. These subagents are pre-provisioned by terraform
// (the agent record already exists), so we update configurable fields like
// display_apps rather than creating a new agent.
if req.Id != nil {
id, err := uuid.FromBytes(req.Id)
if err != nil {
return nil, xerrors.Errorf("parse agent id: %w", err)
}
subAgent, err := a.Database.GetWorkspaceAgentByID(ctx, id)
if err != nil {
return nil, xerrors.Errorf("get workspace agent by id: %w", err)
}
// Validate that the subagent belongs to the current parent agent to
// prevent updating subagents from other agents within the same workspace.
if !subAgent.ParentID.Valid || subAgent.ParentID.UUID != parentAgent.ID {
return nil, xerrors.Errorf("subagent does not belong to this parent agent")
}
if err := a.Database.UpdateWorkspaceAgentDisplayAppsByID(ctx, database.UpdateWorkspaceAgentDisplayAppsByIDParams{
ID: id,
DisplayApps: displayApps,
UpdatedAt: createdAt,
}); err != nil {
return nil, xerrors.Errorf("update workspace agent display apps: %w", err)
}
return &agentproto.CreateSubAgentResponse{
Agent: &agentproto.SubAgent{
Name: subAgent.Name,
Id: subAgent.ID[:],
AuthToken: subAgent.AuthToken[:],
},
}, nil
}
agentName := req.Name
if agentName == "" {
return nil, codersdk.ValidationError{
Field: "name",
Detail: "agent name cannot be empty",
}
}
if !provisioner.AgentNameRegex.MatchString(agentName) {
return nil, codersdk.ValidationError{
Field: "name",
Detail: fmt.Sprintf("agent name %q does not match regex %q", agentName, provisioner.AgentNameRegex),
}
}
subAgent, err := a.Database.InsertWorkspaceAgent(ctx, database.InsertWorkspaceAgentParams{
ID: uuid.New(),
ParentID: uuid.NullUUID{Valid: true, UUID: parentAgent.ID},
@@ -128,7 +91,7 @@ func (a *SubAgentAPI) CreateSubAgent(ctx context.Context, req *agentproto.Create
Name: agentName,
ResourceID: parentAgent.ResourceID,
AuthToken: uuid.New(),
AuthInstanceID: sql.NullString{},
AuthInstanceID: parentAgent.AuthInstanceID,
Architecture: req.Architecture,
EnvironmentVariables: pqtype.NullRawMessage{},
OperatingSystem: req.OperatingSystem,
-264
View File
@@ -175,52 +175,6 @@ func TestSubAgentAPI(t *testing.T) {
}
})
// Context: https://github.com/coder/coder/pull/22196
t.Run("CreateSubAgentDoesNotInheritAuthInstanceID", func(t *testing.T) {
t.Parallel()
var (
log = testutil.Logger(t)
clock = quartz.NewMock(t)
db, org = newDatabaseWithOrg(t)
user, agent = newUserWithWorkspaceAgent(t, db, org)
)
// Given: The parent agent has an AuthInstanceID set
ctx := testutil.Context(t, testutil.WaitShort)
parentAgent, err := db.GetWorkspaceAgentByID(dbauthz.AsSystemRestricted(ctx), agent.ID)
require.NoError(t, err)
require.True(t, parentAgent.AuthInstanceID.Valid, "parent agent should have an AuthInstanceID")
require.NotEmpty(t, parentAgent.AuthInstanceID.String)
api := newAgentAPI(t, log, db, clock, user, org, agent)
// When: We create a sub agent
createResp, err := api.CreateSubAgent(ctx, &proto.CreateSubAgentRequest{
Name: "sub-agent",
Directory: "/workspaces/test",
Architecture: "amd64",
OperatingSystem: "linux",
})
require.NoError(t, err)
subAgentID, err := uuid.FromBytes(createResp.Agent.Id)
require.NoError(t, err)
// Then: The sub-agent must NOT re-use the parent's AuthInstanceID.
subAgent, err := db.GetWorkspaceAgentByID(dbauthz.AsSystemRestricted(ctx), subAgentID)
require.NoError(t, err)
assert.False(t, subAgent.AuthInstanceID.Valid, "sub-agent should not have an AuthInstanceID")
assert.Empty(t, subAgent.AuthInstanceID.String, "sub-agent AuthInstanceID string should be empty")
// Double-check: looking up by the parent's instance ID must
// still return the parent, not the sub-agent.
lookedUp, err := db.GetWorkspaceAgentByInstanceID(dbauthz.AsSystemRestricted(ctx), parentAgent.AuthInstanceID.String)
require.NoError(t, err)
assert.Equal(t, parentAgent.ID, lookedUp.ID, "instance ID lookup should still return the parent agent")
})
type expectedAppError struct {
index int32
field string
@@ -1178,224 +1132,6 @@ func TestSubAgentAPI(t *testing.T) {
require.Equal(t, "Custom App", apps[0].DisplayName)
})
t.Run("CreateSubAgentUpdatesExisting", func(t *testing.T) {
t.Parallel()
baseChildAgent := database.WorkspaceAgent{
Name: "existing-child-agent",
Directory: "/workspaces/test",
Architecture: "amd64",
OperatingSystem: "linux",
DisplayApps: []database.DisplayApp{database.DisplayAppVscode},
}
type testCase struct {
name string
setup func(t *testing.T, db database.Store, agent database.WorkspaceAgent) *proto.CreateSubAgentRequest
wantErr string
check func(t *testing.T, ctx context.Context, db database.Store, resp *proto.CreateSubAgentResponse, agent database.WorkspaceAgent)
}
tests := []testCase{
{
name: "OK",
setup: func(t *testing.T, db database.Store, agent database.WorkspaceAgent) *proto.CreateSubAgentRequest {
// Given: An existing child agent with some display apps.
childAgent := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{
ParentID: uuid.NullUUID{Valid: true, UUID: agent.ID},
ResourceID: agent.ResourceID,
Name: baseChildAgent.Name,
Directory: baseChildAgent.Directory,
Architecture: baseChildAgent.Architecture,
OperatingSystem: baseChildAgent.OperatingSystem,
DisplayApps: baseChildAgent.DisplayApps,
})
// When: We call CreateSubAgent with the existing agent's ID and new display apps.
return &proto.CreateSubAgentRequest{
Id: childAgent.ID[:],
DisplayApps: []proto.CreateSubAgentRequest_DisplayApp{
proto.CreateSubAgentRequest_WEB_TERMINAL,
proto.CreateSubAgentRequest_SSH_HELPER,
},
}
},
check: func(t *testing.T, ctx context.Context, db database.Store, resp *proto.CreateSubAgentResponse, agent database.WorkspaceAgent) {
// Then: The response contains the existing agent's details.
require.NotNil(t, resp.Agent)
require.Equal(t, baseChildAgent.Name, resp.Agent.Name)
agentID, err := uuid.FromBytes(resp.Agent.Id)
require.NoError(t, err)
// And: The database agent's display apps are updated.
updatedAgent, err := db.GetWorkspaceAgentByID(dbauthz.AsSystemRestricted(ctx), agentID)
require.NoError(t, err)
require.Len(t, updatedAgent.DisplayApps, 2)
require.Contains(t, updatedAgent.DisplayApps, database.DisplayAppWebTerminal)
require.Contains(t, updatedAgent.DisplayApps, database.DisplayAppSSHHelper)
},
},
{
name: "OK_OtherFieldsNotModified",
setup: func(t *testing.T, db database.Store, agent database.WorkspaceAgent) *proto.CreateSubAgentRequest {
// Given: An existing child agent with specific properties.
childAgent := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{
ParentID: uuid.NullUUID{Valid: true, UUID: agent.ID},
ResourceID: agent.ResourceID,
Name: baseChildAgent.Name,
Directory: baseChildAgent.Directory,
Architecture: baseChildAgent.Architecture,
OperatingSystem: baseChildAgent.OperatingSystem,
DisplayApps: baseChildAgent.DisplayApps,
})
// When: We call CreateSubAgent with different values for name, directory, arch, and OS.
return &proto.CreateSubAgentRequest{
Id: childAgent.ID[:],
Name: "different-name",
Directory: "/different/path",
Architecture: "arm64",
OperatingSystem: "darwin",
DisplayApps: []proto.CreateSubAgentRequest_DisplayApp{
proto.CreateSubAgentRequest_WEB_TERMINAL,
},
}
},
check: func(t *testing.T, ctx context.Context, db database.Store, resp *proto.CreateSubAgentResponse, agent database.WorkspaceAgent) {
// Then: The response contains the original agent name, not the new one.
require.NotNil(t, resp.Agent)
require.Equal(t, baseChildAgent.Name, resp.Agent.Name)
agentID, err := uuid.FromBytes(resp.Agent.Id)
require.NoError(t, err)
// And: The database agent's other fields are unchanged.
updatedAgent, err := db.GetWorkspaceAgentByID(dbauthz.AsSystemRestricted(ctx), agentID)
require.NoError(t, err)
require.Equal(t, baseChildAgent.Name, updatedAgent.Name)
require.Equal(t, baseChildAgent.Directory, updatedAgent.Directory)
require.Equal(t, baseChildAgent.Architecture, updatedAgent.Architecture)
require.Equal(t, baseChildAgent.OperatingSystem, updatedAgent.OperatingSystem)
// But display apps should be updated.
require.Len(t, updatedAgent.DisplayApps, 1)
require.Equal(t, database.DisplayAppWebTerminal, updatedAgent.DisplayApps[0])
},
},
{
name: "Error/MalformedID",
setup: func(t *testing.T, db database.Store, agent database.WorkspaceAgent) *proto.CreateSubAgentRequest {
// When: We call CreateSubAgent with malformed ID bytes (not 16 bytes).
// uuid.FromBytes requires exactly 16 bytes, so we provide fewer.
return &proto.CreateSubAgentRequest{
Id: []byte("short"),
}
},
wantErr: "parse agent id",
},
{
name: "Error/AgentNotFound",
setup: func(t *testing.T, db database.Store, agent database.WorkspaceAgent) *proto.CreateSubAgentRequest {
// When: We call CreateSubAgent with a non-existent agent ID.
nonExistentID := uuid.New()
return &proto.CreateSubAgentRequest{
Id: nonExistentID[:],
}
},
wantErr: "get workspace agent by id",
},
{
name: "Error/ParentMismatch",
setup: func(t *testing.T, db database.Store, agent database.WorkspaceAgent) *proto.CreateSubAgentRequest {
// Create a second agent (sibling) within the same workspace/resource.
// This sibling has a different parent ID (or no parent).
siblingAgent := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{
ParentID: uuid.NullUUID{Valid: false}, // No parent - it's a top-level agent
ResourceID: agent.ResourceID,
Name: "sibling-agent",
Directory: "/workspaces/sibling",
Architecture: "amd64",
OperatingSystem: "linux",
})
// Create a child of the sibling agent (not our agent).
childOfSibling := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{
ParentID: uuid.NullUUID{Valid: true, UUID: siblingAgent.ID},
ResourceID: agent.ResourceID,
Name: "child-of-sibling",
Directory: "/workspaces/test",
Architecture: "amd64",
OperatingSystem: "linux",
})
// When: Our API (which is for `agent`) tries to update the child of `siblingAgent`.
return &proto.CreateSubAgentRequest{
Id: childOfSibling.ID[:],
DisplayApps: []proto.CreateSubAgentRequest_DisplayApp{
proto.CreateSubAgentRequest_VSCODE,
},
}
},
wantErr: "subagent does not belong to this parent agent",
},
{
name: "Error/NoParentID",
setup: func(t *testing.T, db database.Store, agent database.WorkspaceAgent) *proto.CreateSubAgentRequest {
// Given: An agent without a parent (a top-level agent).
topLevelAgent := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{
ParentID: uuid.NullUUID{Valid: false}, // No parent
ResourceID: agent.ResourceID,
Name: "top-level-agent",
Directory: "/workspaces/test",
Architecture: "amd64",
OperatingSystem: "linux",
})
// When: We try to update this agent as if it were a subagent.
return &proto.CreateSubAgentRequest{
Id: topLevelAgent.ID[:],
DisplayApps: []proto.CreateSubAgentRequest_DisplayApp{
proto.CreateSubAgentRequest_VSCODE,
},
}
},
wantErr: "subagent does not belong to this parent agent",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
var (
log = testutil.Logger(t)
clock = quartz.NewMock(t)
db, org = newDatabaseWithOrg(t)
user, agent = newUserWithWorkspaceAgent(t, db, org)
api = newAgentAPI(t, log, db, clock, user, org, agent)
)
req := tc.setup(t, db, agent)
ctx := testutil.Context(t, testutil.WaitShort)
resp, err := api.CreateSubAgent(ctx, req)
if tc.wantErr != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tc.wantErr)
return
}
require.NoError(t, err)
if tc.check != nil {
tc.check(t, ctx, db, resp, agent)
}
})
}
})
t.Run("ListSubAgents", func(t *testing.T) {
t.Parallel()
+4 -203
View File
@@ -21,12 +21,10 @@ import (
agentapisdk "github.com/coder/agentapi-sdk-go"
"github.com/coder/coder/v2/coderd/audit"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/coderd/httpapi/httperror"
"github.com/coder/coder/v2/coderd/httpmw"
"github.com/coder/coder/v2/coderd/notifications"
"github.com/coder/coder/v2/coderd/rbac"
"github.com/coder/coder/v2/coderd/rbac/policy"
"github.com/coder/coder/v2/coderd/searchquery"
@@ -979,27 +977,10 @@ func (api *API) authAndDoWithTaskAppClient(
ctx := r.Context()
if task.Status != database.TaskStatusActive {
// Return 409 Conflict for valid requests blocked by current state
// (pending/initializing are transitional, paused requires resume).
// Return 400 Bad Request for error/unknown states.
switch task.Status {
case database.TaskStatusPending, database.TaskStatusInitializing:
return httperror.NewResponseError(http.StatusConflict, codersdk.Response{
Message: fmt.Sprintf("Task is %s.", task.Status),
Detail: "The task is resuming. Wait for the task to become active before sending messages.",
})
case database.TaskStatusPaused:
return httperror.NewResponseError(http.StatusConflict, codersdk.Response{
Message: "Task is paused.",
Detail: "Resume the task to send messages.",
})
default:
// Default handler for error and unknown status.
return httperror.NewResponseError(http.StatusBadRequest, codersdk.Response{
Message: "Task must be active.",
Detail: fmt.Sprintf("Task status is %q, it must be %q to interact with the task.", task.Status, codersdk.TaskStatusActive),
})
}
return httperror.NewResponseError(http.StatusBadRequest, codersdk.Response{
Message: "Task status must be active.",
Detail: fmt.Sprintf("Task status is %q, it must be %q to interact with the task.", task.Status, codersdk.TaskStatusActive),
})
}
if !task.WorkspaceID.Valid {
return httperror.NewResponseError(http.StatusBadRequest, codersdk.Response{
@@ -1246,183 +1227,3 @@ func (api *API) postWorkspaceAgentTaskLogSnapshot(rw http.ResponseWriter, r *htt
rw.WriteHeader(http.StatusNoContent)
}
// @Summary Pause task
// @ID pause-task
// @Security CoderSessionToken
// @Accept json
// @Tags Tasks
// @Param user path string true "Username, user ID, or 'me' for the authenticated user"
// @Param task path string true "Task ID" format(uuid)
// @Success 202 {object} codersdk.PauseTaskResponse
// @Router /tasks/{user}/{task}/pause [post]
func (api *API) pauseTask(rw http.ResponseWriter, r *http.Request) {
var (
ctx = r.Context()
apiKey = httpmw.APIKey(r)
task = httpmw.TaskParam(r)
)
if !task.WorkspaceID.Valid {
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
Message: "Task does not have a workspace.",
})
return
}
workspace, err := api.Database.GetWorkspaceByID(ctx, task.WorkspaceID.UUID)
if err != nil {
if httpapi.Is404Error(err) {
httpapi.ResourceNotFound(rw)
return
}
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
Message: "Internal error fetching task workspace.",
Detail: err.Error(),
})
return
}
buildReq := codersdk.CreateWorkspaceBuildRequest{
Transition: codersdk.WorkspaceTransitionStop,
Reason: codersdk.CreateWorkspaceBuildReasonTaskManualPause,
}
build, err := api.postWorkspaceBuildsInternal(
ctx,
apiKey,
workspace,
buildReq,
func(action policy.Action, object rbac.Objecter) bool {
return api.Authorize(r, action, object)
},
audit.WorkspaceBuildBaggageFromRequest(r),
)
if err != nil {
httperror.WriteWorkspaceBuildError(ctx, rw, err)
return
}
if _, err := api.NotificationsEnqueuer.Enqueue(
// nolint:gocritic // Need notifier actor to enqueue notifications.
dbauthz.AsNotifier(ctx),
workspace.OwnerID,
notifications.TemplateTaskPaused,
map[string]string{
"task": task.Name,
"task_id": task.ID.String(),
"workspace": workspace.Name,
"pause_reason": "manual",
},
"api-task-pause",
workspace.ID, workspace.OwnerID, workspace.OrganizationID,
); err != nil {
api.Logger.Warn(ctx, "failed to notify of task paused", slog.Error(err), slog.F("task_id", task.ID), slog.F("workspace_id", workspace.ID))
}
httpapi.Write(ctx, rw, http.StatusAccepted, codersdk.PauseTaskResponse{
WorkspaceBuild: &build,
})
}
// @Summary Resume task
// @ID resume-task
// @Security CoderSessionToken
// @Accept json
// @Tags Tasks
// @Param user path string true "Username, user ID, or 'me' for the authenticated user"
// @Param task path string true "Task ID" format(uuid)
// @Success 202 {object} codersdk.ResumeTaskResponse
// @Router /tasks/{user}/{task}/resume [post]
func (api *API) resumeTask(rw http.ResponseWriter, r *http.Request) {
var (
ctx = r.Context()
apiKey = httpmw.APIKey(r)
task = httpmw.TaskParam(r)
)
if !task.WorkspaceID.Valid {
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
Message: "Task does not have a workspace.",
})
return
}
workspace, err := api.Database.GetWorkspaceByID(ctx, task.WorkspaceID.UUID)
if err != nil {
if httpapi.Is404Error(err) {
httpapi.ResourceNotFound(rw)
return
}
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
Message: "Internal error fetching task workspace.",
Detail: err.Error(),
})
return
}
latestBuild, err := api.Database.GetLatestWorkspaceBuildByWorkspaceID(ctx, workspace.ID)
if err != nil {
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
Message: "Internal error fetching task workspace build.",
Detail: err.Error(),
})
return
}
job, err := api.Database.GetProvisionerJobByID(ctx, latestBuild.JobID)
if err != nil {
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
Message: "Internal error fetching task workspace build job.",
Detail: err.Error(),
})
return
}
workspaceStatus := codersdk.ConvertWorkspaceStatus(
codersdk.ProvisionerJobStatus(job.JobStatus),
codersdk.WorkspaceTransition(latestBuild.Transition),
)
if workspaceStatus == codersdk.WorkspaceStatusRunning {
httpapi.Write(ctx, rw, http.StatusConflict, codersdk.Response{
Message: "Task workspace is already running.",
Detail: fmt.Sprintf("Workspace status is %q.", workspaceStatus),
})
return
}
buildReq := codersdk.CreateWorkspaceBuildRequest{
Transition: codersdk.WorkspaceTransitionStart,
Reason: codersdk.CreateWorkspaceBuildReasonTaskResume,
}
build, err := api.postWorkspaceBuildsInternal(
ctx,
apiKey,
workspace,
buildReq,
func(action policy.Action, object rbac.Objecter) bool {
return api.Authorize(r, action, object)
},
audit.WorkspaceBuildBaggageFromRequest(r),
)
if err != nil {
httperror.WriteWorkspaceBuildError(ctx, rw, err)
return
}
if _, err := api.NotificationsEnqueuer.Enqueue(
// nolint:gocritic // Need notifier actor to enqueue notifications.
dbauthz.AsNotifier(ctx),
workspace.OwnerID,
notifications.TemplateTaskResumed,
map[string]string{
"task": task.Name,
"task_id": task.ID.String(),
"workspace": workspace.Name,
},
"api-task-resume",
workspace.ID, workspace.OwnerID, workspace.OrganizationID,
); err != nil {
api.Logger.Warn(ctx, "failed to notify of task resumed", slog.Error(err), slog.F("task_id", task.ID), slog.F("workspace_id", workspace.ID))
}
httpapi.Write(ctx, rw, http.StatusAccepted, codersdk.ResumeTaskResponse{
WorkspaceBuild: &build,
})
}
+77 -1077
View File
File diff suppressed because it is too large Load Diff
+19 -294
View File
@@ -3745,69 +3745,6 @@ const docTemplate = `{
}
}
},
"/organizations/{organization}/members/{user}/workspaces/available-users": {
"get": {
"security": [
{
"CoderSessionToken": []
}
],
"produces": [
"application/json"
],
"tags": [
"Workspaces"
],
"summary": "Get users available for workspace creation",
"operationId": "get-users-available-for-workspace-creation",
"parameters": [
{
"type": "string",
"format": "uuid",
"description": "Organization ID",
"name": "organization",
"in": "path",
"required": true
},
{
"type": "string",
"description": "User ID, name, or me",
"name": "user",
"in": "path",
"required": true
},
{
"type": "string",
"description": "Search query",
"name": "q",
"in": "query"
},
{
"type": "integer",
"description": "Limit results",
"name": "limit",
"in": "query"
},
{
"type": "integer",
"description": "Offset for pagination",
"name": "offset",
"in": "query"
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/codersdk.MinimalUser"
}
}
}
}
}
},
"/organizations/{organization}/paginated-members": {
"get": {
"security": [
@@ -5887,90 +5824,6 @@ const docTemplate = `{
}
}
},
"/tasks/{user}/{task}/pause": {
"post": {
"security": [
{
"CoderSessionToken": []
}
],
"consumes": [
"application/json"
],
"tags": [
"Tasks"
],
"summary": "Pause task",
"operationId": "pause-task",
"parameters": [
{
"type": "string",
"description": "Username, user ID, or 'me' for the authenticated user",
"name": "user",
"in": "path",
"required": true
},
{
"type": "string",
"format": "uuid",
"description": "Task ID",
"name": "task",
"in": "path",
"required": true
}
],
"responses": {
"202": {
"description": "Accepted",
"schema": {
"$ref": "#/definitions/codersdk.PauseTaskResponse"
}
}
}
}
},
"/tasks/{user}/{task}/resume": {
"post": {
"security": [
{
"CoderSessionToken": []
}
],
"consumes": [
"application/json"
],
"tags": [
"Tasks"
],
"summary": "Resume task",
"operationId": "resume-task",
"parameters": [
{
"type": "string",
"description": "Username, user ID, or 'me' for the authenticated user",
"name": "user",
"in": "path",
"required": true
},
{
"type": "string",
"format": "uuid",
"description": "Task ID",
"name": "task",
"in": "path",
"required": true
}
],
"responses": {
"202": {
"description": "Accepted",
"schema": {
"$ref": "#/definitions/codersdk.ResumeTaskResponse"
}
}
}
}
},
"/tasks/{user}/{task}/send": {
"post": {
"security": [
@@ -8449,54 +8302,6 @@ const docTemplate = `{
}
}
},
"/users/{user}/keys/{keyid}/expire": {
"put": {
"security": [
{
"CoderSessionToken": []
}
],
"tags": [
"Users"
],
"summary": "Expire API key",
"operationId": "expire-api-key",
"parameters": [
{
"type": "string",
"description": "User ID, name, or me",
"name": "user",
"in": "path",
"required": true
},
{
"type": "string",
"format": "string",
"description": "Key ID",
"name": "keyid",
"in": "path",
"required": true
}
],
"responses": {
"204": {
"description": "No Content"
},
"404": {
"description": "Not Found",
"schema": {
"$ref": "#/definitions/codersdk.Response"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"$ref": "#/definitions/codersdk.Response"
}
}
}
}
},
"/users/{user}/login-type": {
"get": {
"security": [
@@ -11122,7 +10927,7 @@ const docTemplate = `{
"parameters": [
{
"type": "string",
"description": "Search query in the format ` + "`" + `key:value` + "`" + `. Available keys are: owner, template, name, status, has-agent, dormant, last_used_after, last_used_before, has-ai-task, has_external_agent, healthy.",
"description": "Search query in the format ` + "`" + `key:value` + "`" + `. Available keys are: owner, template, name, status, has-agent, dormant, last_used_after, last_used_before, has-ai-task, has_external_agent.",
"name": "q",
"in": "query"
},
@@ -12414,9 +12219,6 @@ const docTemplate = `{
"api_key_id": {
"type": "string"
},
"client": {
"type": "string"
},
"ended_at": {
"type": "string",
"format": "date-time"
@@ -12896,7 +12698,6 @@ const docTemplate = `{
"workspace:start",
"workspace:stop",
"workspace:update",
"workspace:update_agent",
"workspace_agent_devcontainers:*",
"workspace_agent_devcontainers:create",
"workspace_agent_resource_monitor:*",
@@ -12915,7 +12716,6 @@ const docTemplate = `{
"workspace_dormant:start",
"workspace_dormant:stop",
"workspace_dormant:update",
"workspace_dormant:update_agent",
"workspace_proxy:*",
"workspace_proxy:create",
"workspace_proxy:delete",
@@ -13100,7 +12900,6 @@ const docTemplate = `{
"APIKeyScopeWorkspaceStart",
"APIKeyScopeWorkspaceStop",
"APIKeyScopeWorkspaceUpdate",
"APIKeyScopeWorkspaceUpdateAgent",
"APIKeyScopeWorkspaceAgentDevcontainersAll",
"APIKeyScopeWorkspaceAgentDevcontainersCreate",
"APIKeyScopeWorkspaceAgentResourceMonitorAll",
@@ -13119,7 +12918,6 @@ const docTemplate = `{
"APIKeyScopeWorkspaceDormantStart",
"APIKeyScopeWorkspaceDormantStop",
"APIKeyScopeWorkspaceDormantUpdate",
"APIKeyScopeWorkspaceDormantUpdateAgent",
"APIKeyScopeWorkspaceProxyAll",
"APIKeyScopeWorkspaceProxyCreate",
"APIKeyScopeWorkspaceProxyDelete",
@@ -13616,10 +13414,7 @@ const docTemplate = `{
"cli",
"ssh_connection",
"vscode_connection",
"jetbrains_connection",
"task_auto_pause",
"task_manual_pause",
"task_resume"
"jetbrains_connection"
],
"x-enum-varnames": [
"BuildReasonInitiator",
@@ -13630,10 +13425,7 @@ const docTemplate = `{
"BuildReasonCLI",
"BuildReasonSSHConnection",
"BuildReasonVSCodeConnection",
"BuildReasonJetbrainsConnection",
"BuildReasonTaskAutoPause",
"BuildReasonTaskManualPause",
"BuildReasonTaskResume"
"BuildReasonJetbrainsConnection"
]
},
"codersdk.CORSBehavior": {
@@ -14306,18 +14098,14 @@ const docTemplate = `{
"cli",
"ssh_connection",
"vscode_connection",
"jetbrains_connection",
"task_manual_pause",
"task_resume"
"jetbrains_connection"
],
"x-enum-varnames": [
"CreateWorkspaceBuildReasonDashboard",
"CreateWorkspaceBuildReasonCLI",
"CreateWorkspaceBuildReasonSSHConnection",
"CreateWorkspaceBuildReasonVSCodeConnection",
"CreateWorkspaceBuildReasonJetbrainsConnection",
"CreateWorkspaceBuildReasonTaskManualPause",
"CreateWorkspaceBuildReasonTaskResume"
"CreateWorkspaceBuildReasonJetbrainsConnection"
]
},
"codersdk.CreateWorkspaceBuildRequest": {
@@ -14351,8 +14139,7 @@ const docTemplate = `{
"cli",
"ssh_connection",
"vscode_connection",
"jetbrains_connection",
"task_manual_pause"
"jetbrains_connection"
],
"allOf": [
{
@@ -15110,16 +14897,6 @@ const docTemplate = `{
"ExperimentWorkspaceSharing": "Enables updating workspace ACLs for sharing with users and groups.",
"ExperimentWorkspaceUsage": "Enables the new workspace usage tracking."
},
"x-enum-descriptions": [
"This isn't used for anything.",
"This should not be taken out of experiments until we have redesigned the feature.",
"Sends notifications via SMTP and webhooks following certain events.",
"Enables the new workspace usage tracking.",
"Enables web push notifications through the browser.",
"Enables OAuth2 provider functionality.",
"Enables the MCP HTTP server functionality.",
"Enables updating workspace ACLs for sharing with users and groups."
],
"x-enum-varnames": [
"ExperimentExample",
"ExperimentAutoFillParameters",
@@ -15368,6 +15145,10 @@ const docTemplate = `{
"limit": {
"type": "integer"
},
"soft_limit": {
"description": "SoftLimit is the soft limit of the feature, and is only used for showing\nincluded limits in the dashboard. No license validation or warnings are\ngenerated from this value.",
"type": "integer"
},
"usage_period": {
"description": "UsagePeriod denotes that the usage is a counter that accumulates over\nthis period (and most likely resets with the issuance of the next\nlicense).\n\nThese dates are determined from the license that this entitlement comes\nfrom, see enterprise/coderd/license/license.go.\n\nOnly certain features set these fields:\n- FeatureManagedAgentLimit",
"allOf": [
@@ -15571,9 +15352,6 @@ const docTemplate = `{
"codersdk.HTTPCookieConfig": {
"type": "object",
"properties": {
"host_prefix": {
"type": "boolean"
},
"same_site": {
"type": "string"
},
@@ -16784,14 +16562,6 @@ const docTemplate = `{
"organization_mapping": {
"type": "object"
},
"redirect_url": {
"description": "RedirectURL is optional, defaulting to 'ACCESS_URL'. Only useful in niche\nsituations where the OIDC callback domain is different from the ACCESS_URL\ndomain.",
"allOf": [
{
"$ref": "#/definitions/serpent.URL"
}
]
},
"scopes": {
"type": "array",
"items": {
@@ -17230,14 +17000,6 @@ const docTemplate = `{
}
}
},
"codersdk.PauseTaskResponse": {
"type": "object",
"properties": {
"workspace_build": {
"$ref": "#/definitions/codersdk.WorkspaceBuild"
}
}
},
"codersdk.Permission": {
"type": "object",
"properties": {
@@ -18030,7 +17792,6 @@ const docTemplate = `{
"share",
"unassign",
"update",
"update_agent",
"update_personal",
"use",
"view_insights",
@@ -18050,7 +17811,6 @@ const docTemplate = `{
"ActionShare",
"ActionUnassign",
"ActionUpdate",
"ActionUpdateAgent",
"ActionUpdatePersonal",
"ActionUse",
"ActionViewInsights",
@@ -18406,14 +18166,6 @@ const docTemplate = `{
}
}
},
"codersdk.ResumeTaskResponse": {
"type": "object",
"properties": {
"workspace_build": {
"$ref": "#/definitions/codersdk.WorkspaceBuild"
}
}
},
"codersdk.RetentionConfig": {
"type": "object",
"properties": {
@@ -19046,9 +18798,6 @@ const docTemplate = `{
"default_ttl_ms": {
"type": "integer"
},
"deleted": {
"type": "boolean"
},
"deprecated": {
"type": "boolean"
},
@@ -19058,10 +18807,6 @@ const docTemplate = `{
"description": {
"type": "string"
},
"disable_module_cache": {
"description": "DisableModuleCache disables the use of cached Terraform modules during\nprovisioning.",
"type": "boolean"
},
"display_name": {
"type": "string"
},
@@ -20018,10 +19763,6 @@ const docTemplate = `{
"description": "DisableEveryoneGroupAccess allows optionally disabling the default\nbehavior of granting the 'everyone' group access to use the template.\nIf this is set to true, the template will not be available to all users,\nand must be explicitly granted to users or groups in the permissions settings\nof the template.",
"type": "boolean"
},
"disable_module_cache": {
"description": "DisableModuleCache disables the using of cached Terraform modules during\nprovisioning. It is recommended not to disable this.",
"type": "boolean"
},
"display_name": {
"type": "string"
},
@@ -21072,14 +20813,6 @@ const docTemplate = `{
}
]
},
"subagent_id": {
"format": "uuid",
"allOf": [
{
"$ref": "#/definitions/uuid.NullUUID"
}
]
},
"workspace_folder": {
"type": "string"
}
@@ -21748,12 +21481,10 @@ const docTemplate = `{
"type": "object",
"properties": {
"p50": {
"type": "number",
"format": "float64"
"type": "number"
},
"p95": {
"type": "number",
"format": "float64"
"type": "number"
}
}
},
@@ -22139,12 +21870,10 @@ const docTemplate = `{
]
},
"recv": {
"type": "integer",
"format": "int64"
"type": "integer"
},
"sent": {
"type": "integer",
"format": "int64"
"type": "integer"
}
}
},
@@ -22771,24 +22500,21 @@ const docTemplate = `{
"description": "keyed by DERP Region ID",
"type": "object",
"additionalProperties": {
"type": "integer",
"format": "int64"
"type": "integer"
}
},
"regionV4Latency": {
"description": "keyed by DERP Region ID",
"type": "object",
"additionalProperties": {
"type": "integer",
"format": "int64"
"type": "integer"
}
},
"regionV6Latency": {
"description": "keyed by DERP Region ID",
"type": "object",
"additionalProperties": {
"type": "integer",
"format": "int64"
"type": "integer"
}
},
"udp": {
@@ -22875,7 +22601,7 @@ const docTemplate = `{
]
},
"default": {
"description": "Default is parsed into Value if set.\nMust be ` + "`" + `\"\"` + "`" + ` if ` + "`" + `DefaultFn` + "`" + ` != nil",
"description": "Default is parsed into Value if set.",
"type": "string"
},
"description": {
@@ -23031,8 +22757,7 @@ const docTemplate = `{
"description": "RegionScore scales latencies of DERP regions by a given scaling\nfactor when determining which region to use as the home\n(\"preferred\") DERP. Scores in the range (0, 1) will cause this\nregion to be proportionally more preferred, and scores in the range\n(1, ∞) will penalize a region.\n\nIf a region is not present in this map, it is treated as having a\nscore of 1.0.\n\nScores should not be 0 or negative; such scores will be ignored.\n\nA nil map means no change from the previous value (if any); an empty\nnon-nil map can be sent to reset all scores back to 1.0.",
"type": "object",
"additionalProperties": {
"type": "number",
"format": "float64"
"type": "number"
}
}
}
+19 -280
View File
@@ -3296,65 +3296,6 @@
}
}
},
"/organizations/{organization}/members/{user}/workspaces/available-users": {
"get": {
"security": [
{
"CoderSessionToken": []
}
],
"produces": ["application/json"],
"tags": ["Workspaces"],
"summary": "Get users available for workspace creation",
"operationId": "get-users-available-for-workspace-creation",
"parameters": [
{
"type": "string",
"format": "uuid",
"description": "Organization ID",
"name": "organization",
"in": "path",
"required": true
},
{
"type": "string",
"description": "User ID, name, or me",
"name": "user",
"in": "path",
"required": true
},
{
"type": "string",
"description": "Search query",
"name": "q",
"in": "query"
},
{
"type": "integer",
"description": "Limit results",
"name": "limit",
"in": "query"
},
{
"type": "integer",
"description": "Offset for pagination",
"name": "offset",
"in": "query"
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/codersdk.MinimalUser"
}
}
}
}
}
},
"/organizations/{organization}/paginated-members": {
"get": {
"security": [
@@ -5206,82 +5147,6 @@
}
}
},
"/tasks/{user}/{task}/pause": {
"post": {
"security": [
{
"CoderSessionToken": []
}
],
"consumes": ["application/json"],
"tags": ["Tasks"],
"summary": "Pause task",
"operationId": "pause-task",
"parameters": [
{
"type": "string",
"description": "Username, user ID, or 'me' for the authenticated user",
"name": "user",
"in": "path",
"required": true
},
{
"type": "string",
"format": "uuid",
"description": "Task ID",
"name": "task",
"in": "path",
"required": true
}
],
"responses": {
"202": {
"description": "Accepted",
"schema": {
"$ref": "#/definitions/codersdk.PauseTaskResponse"
}
}
}
}
},
"/tasks/{user}/{task}/resume": {
"post": {
"security": [
{
"CoderSessionToken": []
}
],
"consumes": ["application/json"],
"tags": ["Tasks"],
"summary": "Resume task",
"operationId": "resume-task",
"parameters": [
{
"type": "string",
"description": "Username, user ID, or 'me' for the authenticated user",
"name": "user",
"in": "path",
"required": true
},
{
"type": "string",
"format": "uuid",
"description": "Task ID",
"name": "task",
"in": "path",
"required": true
}
],
"responses": {
"202": {
"description": "Accepted",
"schema": {
"$ref": "#/definitions/codersdk.ResumeTaskResponse"
}
}
}
}
},
"/tasks/{user}/{task}/send": {
"post": {
"security": [
@@ -7476,52 +7341,6 @@
}
}
},
"/users/{user}/keys/{keyid}/expire": {
"put": {
"security": [
{
"CoderSessionToken": []
}
],
"tags": ["Users"],
"summary": "Expire API key",
"operationId": "expire-api-key",
"parameters": [
{
"type": "string",
"description": "User ID, name, or me",
"name": "user",
"in": "path",
"required": true
},
{
"type": "string",
"format": "string",
"description": "Key ID",
"name": "keyid",
"in": "path",
"required": true
}
],
"responses": {
"204": {
"description": "No Content"
},
"404": {
"description": "Not Found",
"schema": {
"$ref": "#/definitions/codersdk.Response"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"$ref": "#/definitions/codersdk.Response"
}
}
}
}
},
"/users/{user}/login-type": {
"get": {
"security": [
@@ -9846,7 +9665,7 @@
"parameters": [
{
"type": "string",
"description": "Search query in the format `key:value`. Available keys are: owner, template, name, status, has-agent, dormant, last_used_after, last_used_before, has-ai-task, has_external_agent, healthy.",
"description": "Search query in the format `key:value`. Available keys are: owner, template, name, status, has-agent, dormant, last_used_after, last_used_before, has-ai-task, has_external_agent.",
"name": "q",
"in": "query"
},
@@ -11030,9 +10849,6 @@
"api_key_id": {
"type": "string"
},
"client": {
"type": "string"
},
"ended_at": {
"type": "string",
"format": "date-time"
@@ -11504,7 +11320,6 @@
"workspace:start",
"workspace:stop",
"workspace:update",
"workspace:update_agent",
"workspace_agent_devcontainers:*",
"workspace_agent_devcontainers:create",
"workspace_agent_resource_monitor:*",
@@ -11523,7 +11338,6 @@
"workspace_dormant:start",
"workspace_dormant:stop",
"workspace_dormant:update",
"workspace_dormant:update_agent",
"workspace_proxy:*",
"workspace_proxy:create",
"workspace_proxy:delete",
@@ -11708,7 +11522,6 @@
"APIKeyScopeWorkspaceStart",
"APIKeyScopeWorkspaceStop",
"APIKeyScopeWorkspaceUpdate",
"APIKeyScopeWorkspaceUpdateAgent",
"APIKeyScopeWorkspaceAgentDevcontainersAll",
"APIKeyScopeWorkspaceAgentDevcontainersCreate",
"APIKeyScopeWorkspaceAgentResourceMonitorAll",
@@ -11727,7 +11540,6 @@
"APIKeyScopeWorkspaceDormantStart",
"APIKeyScopeWorkspaceDormantStop",
"APIKeyScopeWorkspaceDormantUpdate",
"APIKeyScopeWorkspaceDormantUpdateAgent",
"APIKeyScopeWorkspaceProxyAll",
"APIKeyScopeWorkspaceProxyCreate",
"APIKeyScopeWorkspaceProxyDelete",
@@ -12207,10 +12019,7 @@
"cli",
"ssh_connection",
"vscode_connection",
"jetbrains_connection",
"task_auto_pause",
"task_manual_pause",
"task_resume"
"jetbrains_connection"
],
"x-enum-varnames": [
"BuildReasonInitiator",
@@ -12221,10 +12030,7 @@
"BuildReasonCLI",
"BuildReasonSSHConnection",
"BuildReasonVSCodeConnection",
"BuildReasonJetbrainsConnection",
"BuildReasonTaskAutoPause",
"BuildReasonTaskManualPause",
"BuildReasonTaskResume"
"BuildReasonJetbrainsConnection"
]
},
"codersdk.CORSBehavior": {
@@ -12852,18 +12658,14 @@
"cli",
"ssh_connection",
"vscode_connection",
"jetbrains_connection",
"task_manual_pause",
"task_resume"
"jetbrains_connection"
],
"x-enum-varnames": [
"CreateWorkspaceBuildReasonDashboard",
"CreateWorkspaceBuildReasonCLI",
"CreateWorkspaceBuildReasonSSHConnection",
"CreateWorkspaceBuildReasonVSCodeConnection",
"CreateWorkspaceBuildReasonJetbrainsConnection",
"CreateWorkspaceBuildReasonTaskManualPause",
"CreateWorkspaceBuildReasonTaskResume"
"CreateWorkspaceBuildReasonJetbrainsConnection"
]
},
"codersdk.CreateWorkspaceBuildRequest": {
@@ -12893,8 +12695,7 @@
"cli",
"ssh_connection",
"vscode_connection",
"jetbrains_connection",
"task_manual_pause"
"jetbrains_connection"
],
"allOf": [
{
@@ -13637,16 +13438,6 @@
"ExperimentWorkspaceSharing": "Enables updating workspace ACLs for sharing with users and groups.",
"ExperimentWorkspaceUsage": "Enables the new workspace usage tracking."
},
"x-enum-descriptions": [
"This isn't used for anything.",
"This should not be taken out of experiments until we have redesigned the feature.",
"Sends notifications via SMTP and webhooks following certain events.",
"Enables the new workspace usage tracking.",
"Enables web push notifications through the browser.",
"Enables OAuth2 provider functionality.",
"Enables the MCP HTTP server functionality.",
"Enables updating workspace ACLs for sharing with users and groups."
],
"x-enum-varnames": [
"ExperimentExample",
"ExperimentAutoFillParameters",
@@ -13895,6 +13686,10 @@
"limit": {
"type": "integer"
},
"soft_limit": {
"description": "SoftLimit is the soft limit of the feature, and is only used for showing\nincluded limits in the dashboard. No license validation or warnings are\ngenerated from this value.",
"type": "integer"
},
"usage_period": {
"description": "UsagePeriod denotes that the usage is a counter that accumulates over\nthis period (and most likely resets with the issuance of the next\nlicense).\n\nThese dates are determined from the license that this entitlement comes\nfrom, see enterprise/coderd/license/license.go.\n\nOnly certain features set these fields:\n- FeatureManagedAgentLimit",
"allOf": [
@@ -14092,9 +13887,6 @@
"codersdk.HTTPCookieConfig": {
"type": "object",
"properties": {
"host_prefix": {
"type": "boolean"
},
"same_site": {
"type": "string"
},
@@ -15248,14 +15040,6 @@
"organization_mapping": {
"type": "object"
},
"redirect_url": {
"description": "RedirectURL is optional, defaulting to 'ACCESS_URL'. Only useful in niche\nsituations where the OIDC callback domain is different from the ACCESS_URL\ndomain.",
"allOf": [
{
"$ref": "#/definitions/serpent.URL"
}
]
},
"scopes": {
"type": "array",
"items": {
@@ -15679,14 +15463,6 @@
}
}
},
"codersdk.PauseTaskResponse": {
"type": "object",
"properties": {
"workspace_build": {
"$ref": "#/definitions/codersdk.WorkspaceBuild"
}
}
},
"codersdk.Permission": {
"type": "object",
"properties": {
@@ -16442,7 +16218,6 @@
"share",
"unassign",
"update",
"update_agent",
"update_personal",
"use",
"view_insights",
@@ -16462,7 +16237,6 @@
"ActionShare",
"ActionUnassign",
"ActionUpdate",
"ActionUpdateAgent",
"ActionUpdatePersonal",
"ActionUse",
"ActionViewInsights",
@@ -16808,14 +16582,6 @@
}
}
},
"codersdk.ResumeTaskResponse": {
"type": "object",
"properties": {
"workspace_build": {
"$ref": "#/definitions/codersdk.WorkspaceBuild"
}
}
},
"codersdk.RetentionConfig": {
"type": "object",
"properties": {
@@ -17427,9 +17193,6 @@
"default_ttl_ms": {
"type": "integer"
},
"deleted": {
"type": "boolean"
},
"deprecated": {
"type": "boolean"
},
@@ -17439,10 +17202,6 @@
"description": {
"type": "string"
},
"disable_module_cache": {
"description": "DisableModuleCache disables the use of cached Terraform modules during\nprovisioning.",
"type": "boolean"
},
"display_name": {
"type": "string"
},
@@ -18353,10 +18112,6 @@
"description": "DisableEveryoneGroupAccess allows optionally disabling the default\nbehavior of granting the 'everyone' group access to use the template.\nIf this is set to true, the template will not be available to all users,\nand must be explicitly granted to users or groups in the permissions settings\nof the template.",
"type": "boolean"
},
"disable_module_cache": {
"description": "DisableModuleCache disables the using of cached Terraform modules during\nprovisioning. It is recommended not to disable this.",
"type": "boolean"
},
"display_name": {
"type": "string"
},
@@ -19362,14 +19117,6 @@
}
]
},
"subagent_id": {
"format": "uuid",
"allOf": [
{
"$ref": "#/definitions/uuid.NullUUID"
}
]
},
"workspace_folder": {
"type": "string"
}
@@ -19983,12 +19730,10 @@
"type": "object",
"properties": {
"p50": {
"type": "number",
"format": "float64"
"type": "number"
},
"p95": {
"type": "number",
"format": "float64"
"type": "number"
}
}
},
@@ -20353,12 +20098,10 @@
]
},
"recv": {
"type": "integer",
"format": "int64"
"type": "integer"
},
"sent": {
"type": "integer",
"format": "int64"
"type": "integer"
}
}
},
@@ -20941,24 +20684,21 @@
"description": "keyed by DERP Region ID",
"type": "object",
"additionalProperties": {
"type": "integer",
"format": "int64"
"type": "integer"
}
},
"regionV4Latency": {
"description": "keyed by DERP Region ID",
"type": "object",
"additionalProperties": {
"type": "integer",
"format": "int64"
"type": "integer"
}
},
"regionV6Latency": {
"description": "keyed by DERP Region ID",
"type": "object",
"additionalProperties": {
"type": "integer",
"format": "int64"
"type": "integer"
}
},
"udp": {
@@ -21045,7 +20785,7 @@
]
},
"default": {
"description": "Default is parsed into Value if set.\nMust be `\"\"` if `DefaultFn` != nil",
"description": "Default is parsed into Value if set.",
"type": "string"
},
"description": {
@@ -21195,8 +20935,7 @@
"description": "RegionScore scales latencies of DERP regions by a given scaling\nfactor when determining which region to use as the home\n(\"preferred\") DERP. Scores in the range (0, 1) will cause this\nregion to be proportionally more preferred, and scores in the range\n(1, ∞) will penalize a region.\n\nIf a region is not present in this map, it is treated as having a\nscore of 1.0.\n\nScores should not be 0 or negative; such scores will be ignored.\n\nA nil map means no change from the previous value (if any); an empty\nnon-nil map can be sent to reset all scores back to 1.0.",
"type": "object",
"additionalProperties": {
"type": "number",
"format": "float64"
"type": "number"
}
}
}
-63
View File
@@ -421,69 +421,6 @@ func (api *API) deleteAPIKey(rw http.ResponseWriter, r *http.Request) {
rw.WriteHeader(http.StatusNoContent)
}
// @Summary Expire API key
// @ID expire-api-key
// @Security CoderSessionToken
// @Tags Users
// @Param user path string true "User ID, name, or me"
// @Param keyid path string true "Key ID" format(string)
// @Success 204
// @Failure 404 {object} codersdk.Response
// @Failure 500 {object} codersdk.Response
// @Router /users/{user}/keys/{keyid}/expire [put]
func (api *API) expireAPIKey(rw http.ResponseWriter, r *http.Request) {
var (
ctx = r.Context()
keyID = chi.URLParam(r, "keyid")
auditor = api.Auditor.Load()
aReq, commitAudit = audit.InitRequest[database.APIKey](rw, &audit.RequestParams{
Audit: *auditor,
Log: api.Logger,
Request: r,
Action: database.AuditActionWrite,
})
)
defer commitAudit()
if err := api.Database.InTx(func(db database.Store) error {
key, err := db.GetAPIKeyByID(ctx, keyID)
if err != nil {
return xerrors.Errorf("fetch API key: %w", err)
}
if !key.ExpiresAt.After(api.Clock.Now()) {
return nil // Already expired
}
aReq.Old = key
if err := db.UpdateAPIKeyByID(ctx, database.UpdateAPIKeyByIDParams{
ID: key.ID,
LastUsed: key.LastUsed,
ExpiresAt: dbtime.Now(),
IPAddress: key.IPAddress,
}); err != nil {
return xerrors.Errorf("expire API key: %w", err)
}
// Fetch the updated key for audit log.
newKey, err := db.GetAPIKeyByID(ctx, keyID)
if err != nil {
api.Logger.Warn(ctx, "failed to fetch updated API key for audit log", slog.Error(err))
} else {
aReq.New = newKey
}
return nil
}, nil); httpapi.Is404Error(err) {
httpapi.ResourceNotFound(rw)
return
} else if err != nil {
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
Message: "Internal error expiring API key.",
Detail: err.Error(),
})
return
}
rw.WriteHeader(http.StatusNoContent)
}
// @Summary Get token config
// @ID get-token-config
// @Security CoderSessionToken
+4 -159
View File
@@ -400,7 +400,7 @@ func TestAPIKey_Deleted(t *testing.T) {
require.Error(t, err)
var apiErr *codersdk.Error
require.ErrorAs(t, err, &apiErr)
require.Equal(t, http.StatusNotFound, apiErr.StatusCode())
require.Equal(t, http.StatusBadRequest, apiErr.StatusCode())
}
func TestAPIKey_SetDefault(t *testing.T) {
@@ -439,7 +439,7 @@ func TestAPIKey_PrebuildsNotAllowed(t *testing.T) {
DeploymentValues: dc,
})
setupCtx := testutil.Context(t, testutil.WaitLong)
ctx := testutil.Context(t, testutil.WaitLong)
// Given: an existing api token for the prebuilds user
_, prebuildsToken := dbgen.APIKey(t, db, database.APIKey{
@@ -448,167 +448,12 @@ func TestAPIKey_PrebuildsNotAllowed(t *testing.T) {
client.SetSessionToken(prebuildsToken)
// When: the prebuilds user tries to create an API key
_, err := client.CreateAPIKey(setupCtx, database.PrebuildsSystemUserID.String())
_, err := client.CreateAPIKey(ctx, database.PrebuildsSystemUserID.String())
// Then: denied.
require.ErrorContains(t, err, httpapi.ResourceForbiddenResponse.Message)
// When: the prebuilds user tries to create a token
_, err = client.CreateToken(setupCtx, database.PrebuildsSystemUserID.String(), codersdk.CreateTokenRequest{})
_, err = client.CreateToken(ctx, database.PrebuildsSystemUserID.String(), codersdk.CreateTokenRequest{})
// Then: also denied.
require.ErrorContains(t, err, httpapi.ResourceForbiddenResponse.Message)
}
//nolint:tparallel,paralleltest // Subtests share the same coderdtest instance and auditor.
func TestExpireAPIKey(t *testing.T) {
t.Parallel()
auditor := audit.NewMock()
adminClient := coderdtest.New(t, &coderdtest.Options{Auditor: auditor})
admin := coderdtest.CreateFirstUser(t, adminClient)
memberClient, member := coderdtest.CreateAnotherUser(t, adminClient, admin.OrganizationID)
t.Run("OwnerCanExpireOwnToken", func(t *testing.T) {
ctx := testutil.Context(t, testutil.WaitLong)
// Create a token.
res, err := adminClient.CreateToken(ctx, codersdk.Me, codersdk.CreateTokenRequest{
Lifetime: time.Hour * 24 * 7,
})
require.NoError(t, err)
keyID := strings.Split(res.Key, "-")[0]
// Verify the token is not expired.
key, err := adminClient.APIKeyByID(ctx, codersdk.Me, keyID)
require.NoError(t, err)
require.True(t, key.ExpiresAt.After(time.Now()))
auditor.ResetLogs()
// Expire the token.
err = adminClient.ExpireAPIKey(ctx, codersdk.Me, keyID)
require.NoError(t, err)
// Verify the token is expired.
key, err = adminClient.APIKeyByID(ctx, codersdk.Me, keyID)
require.NoError(t, err)
require.True(t, key.ExpiresAt.Before(time.Now()))
// Verify audit log.
als := auditor.AuditLogs()
require.Len(t, als, 1)
require.Equal(t, database.AuditActionWrite, als[0].Action)
require.Equal(t, database.ResourceTypeApiKey, als[0].ResourceType)
require.Equal(t, admin.UserID.String(), als[0].UserID.String())
})
t.Run("AdminCanExpireOtherUsersToken", func(t *testing.T) {
ctx := testutil.Context(t, testutil.WaitLong)
// Create a token for the member.
res, err := memberClient.CreateToken(ctx, codersdk.Me, codersdk.CreateTokenRequest{
Lifetime: time.Hour * 24 * 7,
})
require.NoError(t, err)
keyID := strings.Split(res.Key, "-")[0]
// Admin expires the member's token.
err = adminClient.ExpireAPIKey(ctx, member.ID.String(), keyID)
require.NoError(t, err)
// Verify the token is expired.
key, err := memberClient.APIKeyByID(ctx, codersdk.Me, keyID)
require.NoError(t, err)
require.True(t, key.ExpiresAt.Before(time.Now()))
})
t.Run("MemberCannotExpireOtherUsersToken", func(t *testing.T) {
ctx := testutil.Context(t, testutil.WaitLong)
// Create a token for the admin.
res, err := adminClient.CreateToken(ctx, codersdk.Me, codersdk.CreateTokenRequest{
Lifetime: time.Hour * 24 * 7,
})
require.NoError(t, err)
keyID := strings.Split(res.Key, "-")[0]
// Member attempts to expire admin's token.
err = memberClient.ExpireAPIKey(ctx, admin.UserID.String(), keyID)
require.Error(t, err)
var sdkErr *codersdk.Error
require.ErrorAs(t, err, &sdkErr)
// Members cannot read other users, so they get a 404 Not Found
// from the authorization layer.
require.Equal(t, http.StatusNotFound, sdkErr.StatusCode())
})
t.Run("NotFound", func(t *testing.T) {
ctx := testutil.Context(t, testutil.WaitLong)
// Try to expire a non-existent token.
err := adminClient.ExpireAPIKey(ctx, codersdk.Me, "nonexistent")
require.Error(t, err)
var sdkErr *codersdk.Error
require.ErrorAs(t, err, &sdkErr)
require.Equal(t, http.StatusNotFound, sdkErr.StatusCode())
})
t.Run("ExpiringAlreadyExpiredTokenSucceeds", func(t *testing.T) {
ctx := testutil.Context(t, testutil.WaitLong)
// Create and expire a token.
res, err := adminClient.CreateToken(ctx, codersdk.Me, codersdk.CreateTokenRequest{
Lifetime: time.Hour * 24 * 7,
})
require.NoError(t, err)
keyID := strings.Split(res.Key, "-")[0]
// Expire it once.
err = adminClient.ExpireAPIKey(ctx, codersdk.Me, keyID)
require.NoError(t, err)
// Invariant: make sure it's actually expired
key, err := adminClient.APIKeyByID(ctx, codersdk.Me, keyID)
require.NoError(t, err)
require.LessOrEqual(t, key.ExpiresAt, time.Now(), "key should be expired")
// Expire it again - should succeed (idempotent).
err = adminClient.ExpireAPIKey(ctx, codersdk.Me, keyID)
require.NoError(t, err)
// Token should still be just as expired as before. No more, no less.
keyAgain, err := adminClient.APIKeyByID(ctx, codersdk.Me, keyID)
require.NoError(t, err)
require.Equal(t, key.ExpiresAt, keyAgain.ExpiresAt, "expiration should be idempotent")
})
t.Run("DeletingExpiredTokenSucceeds", func(t *testing.T) {
ctx := testutil.Context(t, testutil.WaitLong)
// Create a token.
res, err := adminClient.CreateToken(ctx, codersdk.Me, codersdk.CreateTokenRequest{
Lifetime: time.Hour * 24 * 7,
})
require.NoError(t, err)
keyID := strings.Split(res.Key, "-")[0]
// Expire it first.
err = adminClient.ExpireAPIKey(ctx, codersdk.Me, keyID)
require.NoError(t, err)
// Verify it's expired.
key, err := adminClient.APIKeyByID(ctx, codersdk.Me, keyID)
require.NoError(t, err)
require.True(t, key.ExpiresAt.Before(time.Now()))
// Delete the expired token - should succeed.
err = adminClient.DeleteAPIKey(ctx, codersdk.Me, keyID)
require.NoError(t, err)
// Verify it's gone.
_, err = adminClient.APIKeyByID(ctx, codersdk.Me, keyID)
require.Error(t, err)
var sdkErr *codersdk.Error
require.ErrorAs(t, err, &sdkErr)
require.Equal(t, http.StatusNotFound, sdkErr.StatusCode())
})
}
+18 -56
View File
@@ -48,10 +48,9 @@ type Executor struct {
tick <-chan time.Time
statsCh chan<- Stats
// NotificationsEnqueuer handles enqueueing notifications for delivery by SMTP, webhook, etc.
notificationsEnqueuer notifications.Enqueuer
reg prometheus.Registerer
experiments codersdk.Experiments
workspaceBuilderMetrics *wsbuilder.Metrics
notificationsEnqueuer notifications.Enqueuer
reg prometheus.Registerer
experiments codersdk.Experiments
metrics executorMetrics
}
@@ -68,24 +67,23 @@ type Stats struct {
}
// New returns a new wsactions executor.
func NewExecutor(ctx context.Context, db database.Store, ps pubsub.Pubsub, fc *files.Cache, reg prometheus.Registerer, tss *atomic.Pointer[schedule.TemplateScheduleStore], auditor *atomic.Pointer[audit.Auditor], acs *atomic.Pointer[dbauthz.AccessControlStore], buildUsageChecker *atomic.Pointer[wsbuilder.UsageChecker], log slog.Logger, tick <-chan time.Time, enqueuer notifications.Enqueuer, exp codersdk.Experiments, workspaceBuilderMetrics *wsbuilder.Metrics) *Executor {
func NewExecutor(ctx context.Context, db database.Store, ps pubsub.Pubsub, fc *files.Cache, reg prometheus.Registerer, tss *atomic.Pointer[schedule.TemplateScheduleStore], auditor *atomic.Pointer[audit.Auditor], acs *atomic.Pointer[dbauthz.AccessControlStore], buildUsageChecker *atomic.Pointer[wsbuilder.UsageChecker], log slog.Logger, tick <-chan time.Time, enqueuer notifications.Enqueuer, exp codersdk.Experiments) *Executor {
factory := promauto.With(reg)
le := &Executor{
//nolint:gocritic // Autostart has a limited set of permissions.
ctx: dbauthz.AsAutostart(ctx),
db: db,
ps: ps,
fileCache: fc,
templateScheduleStore: tss,
tick: tick,
log: log.Named("autobuild"),
auditor: auditor,
accessControlStore: acs,
buildUsageChecker: buildUsageChecker,
notificationsEnqueuer: enqueuer,
reg: reg,
experiments: exp,
workspaceBuilderMetrics: workspaceBuilderMetrics,
ctx: dbauthz.AsAutostart(ctx),
db: db,
ps: ps,
fileCache: fc,
templateScheduleStore: tss,
tick: tick,
log: log.Named("autobuild"),
auditor: auditor,
accessControlStore: acs,
buildUsageChecker: buildUsageChecker,
notificationsEnqueuer: enqueuer,
reg: reg,
experiments: exp,
metrics: executorMetrics{
autobuildExecutionDuration: factory.NewHistogram(prometheus.HistogramOpts{
Namespace: "coderd",
@@ -231,7 +229,6 @@ func (e *Executor) runOnce(t time.Time) Stats {
job *database.ProvisionerJob
auditLog *auditParams
shouldNotifyDormancy bool
shouldNotifyTaskPause bool
nextBuild *database.WorkspaceBuild
activeTemplateVersion database.TemplateVersion
ws database.Workspace
@@ -317,10 +314,6 @@ func (e *Executor) runOnce(t time.Time) Stats {
return nil
}
if reason == database.BuildReasonTaskAutoPause {
shouldNotifyTaskPause = true
}
// Get the template version job to access tags
templateVersionJob, err := tx.GetProvisionerJobByID(e.ctx, activeTemplateVersion.JobID)
if err != nil {
@@ -342,8 +335,7 @@ func (e *Executor) runOnce(t time.Time) Stats {
SetLastWorkspaceBuildInTx(&latestBuild).
SetLastWorkspaceBuildJobInTx(&latestJob).
Experiments(e.experiments).
Reason(reason).
BuildMetrics(e.workspaceBuilderMetrics)
Reason(reason)
log.Debug(e.ctx, "auto building workspace", slog.F("transition", nextTransition))
if nextTransition == database.WorkspaceTransitionStart &&
useActiveVersion(accessControl, ws) {
@@ -487,28 +479,6 @@ func (e *Executor) runOnce(t time.Time) Stats {
log.Warn(e.ctx, "failed to notify of workspace marked as dormant", slog.Error(err), slog.F("workspace_id", ws.ID))
}
}
if shouldNotifyTaskPause {
task, err := e.db.GetTaskByID(e.ctx, ws.TaskID.UUID)
if err != nil {
log.Warn(e.ctx, "failed to get task for pause notification", slog.Error(err), slog.F("task_id", ws.TaskID.UUID), slog.F("workspace_id", ws.ID))
} else {
if _, err := e.notificationsEnqueuer.Enqueue(
e.ctx,
ws.OwnerID,
notifications.TemplateTaskPaused,
map[string]string{
"task": task.Name,
"task_id": task.ID.String(),
"workspace": ws.Name,
"pause_reason": "inactivity exceeded the dormancy threshold",
},
"lifecycle_executor",
ws.ID, ws.OwnerID, ws.OrganizationID,
); err != nil {
log.Warn(e.ctx, "failed to notify of task paused", slog.Error(err), slog.F("task_id", ws.TaskID.UUID), slog.F("workspace_id", ws.ID))
}
}
}
return nil
}()
if err != nil && !xerrors.Is(err, context.Canceled) {
@@ -552,18 +522,10 @@ func getNextTransition(
) {
switch {
case isEligibleForAutostop(user, ws, latestBuild, latestJob, currentTick):
// Use task-specific reason for AI task workspaces.
if ws.TaskID.Valid {
return database.WorkspaceTransitionStop, database.BuildReasonTaskAutoPause, nil
}
return database.WorkspaceTransitionStop, database.BuildReasonAutostop, nil
case isEligibleForAutostart(user, ws, latestBuild, latestJob, templateSchedule, currentTick):
return database.WorkspaceTransitionStart, database.BuildReasonAutostart, nil
case isEligibleForFailedStop(latestBuild, latestJob, templateSchedule, currentTick):
// Use task-specific reason for AI task workspaces.
if ws.TaskID.Valid {
return database.WorkspaceTransitionStop, database.BuildReasonTaskAutoPause, nil
}
return database.WorkspaceTransitionStop, database.BuildReasonAutostop, nil
case isEligibleForDormantStop(ws, templateSchedule, currentTick):
// Only stop started workspaces.
@@ -5,113 +5,12 @@ import (
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/schedule"
)
func Test_getNextTransition_TaskAutoPause(t *testing.T) {
t.Parallel()
// Set up a workspace that is eligible for autostop (past deadline).
now := time.Now()
pastDeadline := now.Add(-time.Hour)
okUser := database.User{Status: database.UserStatusActive}
okBuild := database.WorkspaceBuild{
Transition: database.WorkspaceTransitionStart,
Deadline: pastDeadline,
}
okJob := database.ProvisionerJob{
JobStatus: database.ProvisionerJobStatusSucceeded,
}
okTemplateSchedule := schedule.TemplateScheduleOptions{}
// Failed build setup for failedstop tests.
failedBuild := database.WorkspaceBuild{
Transition: database.WorkspaceTransitionStart,
}
failedJob := database.ProvisionerJob{
JobStatus: database.ProvisionerJobStatusFailed,
CompletedAt: sql.NullTime{Time: now.Add(-time.Hour), Valid: true},
}
failedTemplateSchedule := schedule.TemplateScheduleOptions{
FailureTTL: time.Minute, // TTL already elapsed since job completed an hour ago.
}
testCases := []struct {
Name string
Workspace database.Workspace
Build database.WorkspaceBuild
Job database.ProvisionerJob
TemplateSchedule schedule.TemplateScheduleOptions
ExpectedReason database.BuildReason
}{
{
Name: "RegularWorkspace_Autostop",
Workspace: database.Workspace{
DormantAt: sql.NullTime{Valid: false},
},
Build: okBuild,
Job: okJob,
TemplateSchedule: okTemplateSchedule,
ExpectedReason: database.BuildReasonAutostop,
},
{
Name: "TaskWorkspace_Autostop_UsesTaskAutoPause",
Workspace: database.Workspace{
DormantAt: sql.NullTime{Valid: false},
TaskID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
},
Build: okBuild,
Job: okJob,
TemplateSchedule: okTemplateSchedule,
ExpectedReason: database.BuildReasonTaskAutoPause,
},
{
Name: "RegularWorkspace_FailedStop",
Workspace: database.Workspace{
DormantAt: sql.NullTime{Valid: false},
},
Build: failedBuild,
Job: failedJob,
TemplateSchedule: failedTemplateSchedule,
ExpectedReason: database.BuildReasonAutostop,
},
{
Name: "TaskWorkspace_FailedStop_UsesTaskAutoPause",
Workspace: database.Workspace{
DormantAt: sql.NullTime{Valid: false},
TaskID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
},
Build: failedBuild,
Job: failedJob,
TemplateSchedule: failedTemplateSchedule,
ExpectedReason: database.BuildReasonTaskAutoPause,
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
t.Parallel()
transition, reason, err := getNextTransition(
okUser,
tc.Workspace,
tc.Build,
tc.Job,
tc.TemplateSchedule,
now,
)
require.NoError(t, err)
require.Equal(t, database.WorkspaceTransitionStop, transition)
require.Equal(t, tc.ExpectedReason, reason)
})
}
}
func Test_isEligibleForAutostart(t *testing.T) {
t.Parallel()
@@ -2019,69 +2019,5 @@ func TestExecutorTaskWorkspace(t *testing.T) {
assert.Contains(t, stats.Transitions, workspace.ID, "task workspace should be in transitions")
assert.Equal(t, database.WorkspaceTransitionStop, stats.Transitions[workspace.ID], "should autostop the workspace")
require.Empty(t, stats.Errors, "should have no errors when managing task workspaces")
// Then: The build reason should be TaskAutoPause (not regular Autostop)
workspace = coderdtest.MustWorkspace(t, client, workspace.ID)
_ = coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
workspace = coderdtest.MustWorkspace(t, client, workspace.ID)
assert.Equal(t, codersdk.BuildReasonTaskAutoPause, workspace.LatestBuild.Reason, "task workspace should use TaskAutoPause build reason")
})
t.Run("AutostopNotification", func(t *testing.T) {
t.Parallel()
var (
tickCh = make(chan time.Time)
statsCh = make(chan autobuild.Stats)
notifyEnq = notificationstest.FakeEnqueuer{}
client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{
AutobuildTicker: tickCh,
IncludeProvisionerDaemon: true,
AutobuildStats: statsCh,
NotificationsEnqueuer: &notifyEnq,
})
admin = coderdtest.CreateFirstUser(t, client)
)
// Given: A task workspace with an 8 hour deadline
ctx := testutil.Context(t, testutil.WaitShort)
template := createTaskTemplate(t, client, admin.OrganizationID, ctx, 8*time.Hour)
workspace := createTaskWorkspace(t, client, template, ctx, "test task for autostop notification")
// Given: The workspace is currently running
workspace = coderdtest.MustWorkspace(t, client, workspace.ID)
require.Equal(t, codersdk.WorkspaceTransitionStart, workspace.LatestBuild.Transition)
require.NotZero(t, workspace.LatestBuild.Deadline, "workspace should have a deadline for autostop")
p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, map[string]string{})
require.NoError(t, err)
// When: the autobuild executor ticks after the deadline
go func() {
tickTime := workspace.LatestBuild.Deadline.Time.Add(time.Minute)
coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime)
tickCh <- tickTime
close(tickCh)
}()
// Then: We expect to see a stop transition
stats := <-statsCh
require.Len(t, stats.Transitions, 1, "lifecycle executor should transition the task workspace")
assert.Contains(t, stats.Transitions, workspace.ID, "task workspace should be in transitions")
assert.Equal(t, database.WorkspaceTransitionStop, stats.Transitions[workspace.ID], "should autostop the workspace")
require.Empty(t, stats.Errors, "should have no errors when managing task workspaces")
// Then: A task paused notification was sent with "idle timeout" reason
require.True(t, workspace.TaskID.Valid, "workspace should have a task ID")
task, err := db.GetTaskByID(dbauthz.AsSystemRestricted(ctx), workspace.TaskID.UUID)
require.NoError(t, err)
sent := notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateTaskPaused))
require.Len(t, sent, 1)
require.Equal(t, workspace.OwnerID, sent[0].UserID)
require.Equal(t, task.Name, sent[0].Labels["task"])
require.Equal(t, task.ID.String(), sent[0].Labels["task_id"])
require.Equal(t, workspace.Name, sent[0].Labels["workspace"])
require.Equal(t, "inactivity exceeded the dormancy threshold", sent[0].Labels["pause_reason"])
})
}
+9 -20
View File
@@ -95,26 +95,15 @@ func (t *Tracker) FlushToDB(ctx context.Context, db database.Store, replicaID uu
t.mu.Unlock()
//nolint:gocritic // This is the actual package doing boundary usage tracking.
authCtx := dbauthz.AsBoundaryUsageTracker(ctx)
err := db.InTx(func(tx database.Store) error {
// The advisory lock ensures a clean period cutover by preventing
// this upsert from racing with the aggregate+delete in
// GetAndResetBoundaryUsageSummary. Without it, upserted data
// could be lost or miscounted across periods.
if err := tx.AcquireLock(authCtx, database.LockIDBoundaryUsageStats); err != nil {
return err
}
_, err := tx.UpsertBoundaryUsageStats(authCtx, database.UpsertBoundaryUsageStatsParams{
ReplicaID: replicaID,
UniqueWorkspacesCount: workspaceCount, // cumulative, for UPDATE
UniqueUsersCount: userCount, // cumulative, for UPDATE
UniqueWorkspacesDelta: workspaceDelta, // delta, for INSERT
UniqueUsersDelta: userDelta, // delta, for INSERT
AllowedRequests: allowed,
DeniedRequests: denied,
})
return err
}, nil)
_, err := db.UpsertBoundaryUsageStats(dbauthz.AsBoundaryUsageTracker(ctx), database.UpsertBoundaryUsageStatsParams{
ReplicaID: replicaID,
UniqueWorkspacesCount: workspaceCount, // cumulative, for UPDATE
UniqueUsersCount: userCount, // cumulative, for UPDATE
UniqueWorkspacesDelta: workspaceDelta, // delta, for INSERT
UniqueUsersDelta: userDelta, // delta, for INSERT
AllowedRequests: allowed,
DeniedRequests: denied,
})
// Always reset cumulative counts to prevent unbounded memory growth (e.g.
// if the DB is unreachable). Copy delta maps to preserve any Track() calls
+87 -42
View File
@@ -45,7 +45,7 @@ func TestTracker_Track_Single(t *testing.T) {
// Verify the data was written correctly.
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
require.NoError(t, err)
require.Equal(t, int64(1), summary.UniqueWorkspaces)
require.Equal(t, int64(1), summary.UniqueUsers)
@@ -73,7 +73,7 @@ func TestTracker_Track_DuplicateWorkspaceUser(t *testing.T) {
require.NoError(t, err)
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
require.NoError(t, err)
require.Equal(t, int64(1), summary.UniqueWorkspaces, "should be 1 unique workspace")
require.Equal(t, int64(1), summary.UniqueUsers, "should be 1 unique user")
@@ -102,7 +102,7 @@ func TestTracker_Track_MultipleWorkspacesUsers(t *testing.T) {
require.NoError(t, err)
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
require.NoError(t, err)
require.Equal(t, int64(3), summary.UniqueWorkspaces)
require.Equal(t, int64(2), summary.UniqueUsers)
@@ -140,7 +140,7 @@ func TestTracker_Track_Concurrent(t *testing.T) {
require.NoError(t, err)
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
require.NoError(t, err)
require.Equal(t, int64(numGoroutines), summary.UniqueWorkspaces)
require.Equal(t, int64(numGoroutines), summary.UniqueUsers)
@@ -175,7 +175,7 @@ func TestTracker_FlushToDB_Accumulates(t *testing.T) {
require.NoError(t, err)
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
require.NoError(t, err)
require.Equal(t, int64(1), summary.UniqueWorkspaces)
require.Equal(t, int64(1), summary.UniqueUsers)
@@ -202,7 +202,7 @@ func TestTracker_FlushToDB_NewPeriod(t *testing.T) {
require.NoError(t, err)
// Simulate telemetry reset (new period).
_, err = db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
err = db.ResetBoundaryUsageStats(boundaryCtx)
require.NoError(t, err)
// Track new data.
@@ -215,7 +215,7 @@ func TestTracker_FlushToDB_NewPeriod(t *testing.T) {
require.NoError(t, err)
// The summary should only contain the new data after reset.
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
require.NoError(t, err)
require.Equal(t, int64(1), summary.UniqueWorkspaces, "should only count new workspace")
require.Equal(t, int64(1), summary.UniqueUsers, "should only count new user")
@@ -237,7 +237,7 @@ func TestTracker_FlushToDB_NoActivity(t *testing.T) {
// Verify nothing was written to DB.
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
require.NoError(t, err)
require.Equal(t, int64(0), summary.UniqueWorkspaces)
require.Equal(t, int64(0), summary.AllowedRequests)
@@ -265,7 +265,7 @@ func TestUpsertBoundaryUsageStats_Insert(t *testing.T) {
require.True(t, newPeriod, "should return true for insert")
// Verify INSERT used the delta values, not cumulative.
summary, err := db.GetAndResetBoundaryUsageSummary(ctx, 60000)
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
require.NoError(t, err)
require.Equal(t, int64(5), summary.UniqueWorkspaces)
require.Equal(t, int64(3), summary.UniqueUsers)
@@ -301,7 +301,7 @@ func TestUpsertBoundaryUsageStats_Update(t *testing.T) {
require.False(t, newPeriod, "should return false for update")
// Verify UPDATE used cumulative values.
summary, err := db.GetAndResetBoundaryUsageSummary(ctx, 60000)
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
require.NoError(t, err)
require.Equal(t, int64(8), summary.UniqueWorkspaces)
require.Equal(t, int64(5), summary.UniqueUsers)
@@ -309,7 +309,7 @@ func TestUpsertBoundaryUsageStats_Update(t *testing.T) {
require.Equal(t, int64(10+20), summary.DeniedRequests)
}
func TestGetAndResetBoundaryUsageSummary_MultipleReplicas(t *testing.T) {
func TestGetBoundaryUsageSummary_MultipleReplicas(t *testing.T) {
t.Parallel()
db, _ := dbtestutil.NewDB(t)
@@ -347,7 +347,7 @@ func TestGetAndResetBoundaryUsageSummary_MultipleReplicas(t *testing.T) {
})
require.NoError(t, err)
summary, err := db.GetAndResetBoundaryUsageSummary(ctx, 60000)
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
require.NoError(t, err)
// Verify aggregation (SUM of all replicas).
@@ -357,13 +357,13 @@ func TestGetAndResetBoundaryUsageSummary_MultipleReplicas(t *testing.T) {
require.Equal(t, int64(45), summary.DeniedRequests) // 10 + 15 + 20
}
func TestGetAndResetBoundaryUsageSummary_Empty(t *testing.T) {
func TestGetBoundaryUsageSummary_Empty(t *testing.T) {
t.Parallel()
db, _ := dbtestutil.NewDB(t)
ctx := dbauthz.AsBoundaryUsageTracker(context.Background())
summary, err := db.GetAndResetBoundaryUsageSummary(ctx, 60000)
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
require.NoError(t, err)
// COALESCE should return 0 for all columns.
@@ -373,7 +373,7 @@ func TestGetAndResetBoundaryUsageSummary_Empty(t *testing.T) {
require.Equal(t, int64(0), summary.DeniedRequests)
}
func TestGetAndResetBoundaryUsageSummary_DeletesData(t *testing.T) {
func TestResetBoundaryUsageStats(t *testing.T) {
t.Parallel()
db, _ := dbtestutil.NewDB(t)
@@ -391,19 +391,61 @@ func TestGetAndResetBoundaryUsageSummary_DeletesData(t *testing.T) {
require.NoError(t, err)
}
// Should return the summary AND delete all data.
summary, err := db.GetAndResetBoundaryUsageSummary(ctx, 60000)
// Verify data exists.
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
require.NoError(t, err)
require.Greater(t, summary.AllowedRequests, int64(0))
// Reset.
err = db.ResetBoundaryUsageStats(ctx)
require.NoError(t, err)
require.Equal(t, int64(1+2+3+4+5), summary.UniqueWorkspaces)
require.Equal(t, int64(10+20+30+40+50), summary.AllowedRequests)
// Verify all data is gone.
summary, err = db.GetAndResetBoundaryUsageSummary(ctx, 60000)
summary, err = db.GetBoundaryUsageSummary(ctx, 60000)
require.NoError(t, err)
require.Equal(t, int64(0), summary.UniqueWorkspaces)
require.Equal(t, int64(0), summary.AllowedRequests)
}
func TestDeleteBoundaryUsageStatsByReplicaID(t *testing.T) {
t.Parallel()
db, _ := dbtestutil.NewDB(t)
ctx := dbauthz.AsBoundaryUsageTracker(context.Background())
replica1 := uuid.New()
replica2 := uuid.New()
// Insert stats for 2 replicas. Delta fields are used for INSERT.
_, err := db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
ReplicaID: replica1,
UniqueWorkspacesDelta: 10,
UniqueUsersDelta: 5,
AllowedRequests: 100,
DeniedRequests: 10,
})
require.NoError(t, err)
_, err = db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
ReplicaID: replica2,
UniqueWorkspacesDelta: 20,
UniqueUsersDelta: 10,
AllowedRequests: 200,
DeniedRequests: 20,
})
require.NoError(t, err)
// Delete replica1's stats.
err = db.DeleteBoundaryUsageStatsByReplicaID(ctx, replica1)
require.NoError(t, err)
// Verify only replica2's stats remain.
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
require.NoError(t, err)
require.Equal(t, int64(20), summary.UniqueWorkspaces)
require.Equal(t, int64(200), summary.AllowedRequests)
}
func TestTracker_TelemetryCycle(t *testing.T) {
t.Parallel()
@@ -435,8 +477,8 @@ func TestTracker_TelemetryCycle(t *testing.T) {
require.NoError(t, tracker2.FlushToDB(ctx, db, replica2))
require.NoError(t, tracker3.FlushToDB(ctx, db, replica3))
// Telemetry aggregates and resets (simulating telemetry report sent).
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
// Telemetry aggregates.
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
require.NoError(t, err)
// Verify aggregation.
@@ -445,12 +487,15 @@ func TestTracker_TelemetryCycle(t *testing.T) {
require.Equal(t, int64(105), summary.AllowedRequests) // 25 + 75 + 5
require.Equal(t, int64(15), summary.DeniedRequests) // 3 + 12 + 0
// Telemetry resets stats (simulating telemetry report sent).
require.NoError(t, db.ResetBoundaryUsageStats(boundaryCtx))
// Next flush from trackers should detect new period.
tracker1.Track(uuid.New(), uuid.New(), 1, 0)
require.NoError(t, tracker1.FlushToDB(ctx, db, replica1))
// Verify trackers reset their in-memory state.
summary, err = db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
summary, err = db.GetBoundaryUsageSummary(boundaryCtx, 60000)
require.NoError(t, err)
require.Equal(t, int64(1), summary.UniqueWorkspaces)
require.Equal(t, int64(1), summary.AllowedRequests)
@@ -468,24 +513,30 @@ func TestTracker_FlushToDB_NoStaleDataAfterReset(t *testing.T) {
workspaceID := uuid.New()
ownerID := uuid.New()
// Track some data and flush.
// Track some data, flush, and verify.
tracker.Track(workspaceID, ownerID, 10, 5)
err := tracker.FlushToDB(ctx, db, replicaID)
require.NoError(t, err)
// Simulate telemetry reset (new period) - this also verifies the data.
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
require.NoError(t, err)
require.Equal(t, int64(1), summary.UniqueWorkspaces)
require.Equal(t, int64(10), summary.AllowedRequests)
// Simulate telemetry reset (new period).
err = db.ResetBoundaryUsageStats(boundaryCtx)
require.NoError(t, err)
summary, err = db.GetBoundaryUsageSummary(boundaryCtx, 60000)
require.NoError(t, err)
require.Equal(t, int64(0), summary.AllowedRequests)
// Flush again without any new Track() calls. This should not write stale
// data back to the DB.
err = tracker.FlushToDB(ctx, db, replicaID)
require.NoError(t, err)
// Summary should be empty (no stale data written).
summary, err = db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
summary, err = db.GetBoundaryUsageSummary(boundaryCtx, 60000)
require.NoError(t, err)
require.Equal(t, int64(0), summary.UniqueWorkspaces)
require.Equal(t, int64(0), summary.UniqueUsers)
@@ -531,7 +582,7 @@ func TestTracker_ConcurrentFlushAndTrack(t *testing.T) {
// Verify stats are non-negative.
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
require.NoError(t, err)
require.GreaterOrEqual(t, summary.AllowedRequests, int64(0))
require.GreaterOrEqual(t, summary.DeniedRequests, int64(0))
@@ -546,17 +597,6 @@ type trackDuringUpsertDB struct {
userID uuid.UUID
}
func (s *trackDuringUpsertDB) InTx(fn func(database.Store) error, opts *database.TxOptions) error {
return s.Store.InTx(func(tx database.Store) error {
return fn(&trackDuringUpsertDB{
Store: tx,
tracker: s.tracker,
workspaceID: s.workspaceID,
userID: s.userID,
})
}, opts)
}
func (s *trackDuringUpsertDB) UpsertBoundaryUsageStats(ctx context.Context, arg database.UpsertBoundaryUsageStatsParams) (bool, error) {
s.tracker.Track(s.workspaceID, s.userID, 20, 10)
return s.Store.UpsertBoundaryUsageStats(ctx, arg)
@@ -586,12 +626,17 @@ func TestTracker_TrackDuringFlush(t *testing.T) {
err := tracker.FlushToDB(ctx, trackingDB, replicaID)
require.NoError(t, err)
// Second flush captures the Track() that happened during the first flush.
// Verify first flush only wrote the initial data.
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
require.NoError(t, err)
require.Equal(t, int64(10), summary.AllowedRequests)
// The second flush should include the Track() call that happened during the
// first flush's DB operation.
err = tracker.FlushToDB(ctx, db, replicaID)
require.NoError(t, err)
// Verify both flushes are in the summary.
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
summary, err = db.GetBoundaryUsageSummary(boundaryCtx, 60000)
require.NoError(t, err)
require.Equal(t, int64(10+20), summary.AllowedRequests)
require.Equal(t, int64(5+10), summary.DeniedRequests)
-20
View File
@@ -1,20 +0,0 @@
Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka), Google Inc.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-440
View File
@@ -1,440 +0,0 @@
// Package cachecompress creates a compressed cache of static files based on an http.FS. It is modified from
// https://github.com/go-chi/chi Compressor middleware. See the LICENSE file in this directory for copyright
// information.
package cachecompress
import (
"compress/flate"
"compress/gzip"
"context"
"encoding/base64"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"golang.org/x/xerrors"
"cdr.dev/slog/v3"
)
type cacheKey struct {
encoding string
urlPath string
}
func (c cacheKey) filePath(cacheDir string) string {
// URLs can have slashes or other characters we don't want the file system interpreting. So we just encode the path
// to a flat base64 filename.
filename := base64.URLEncoding.EncodeToString([]byte(c.urlPath))
return filepath.Join(cacheDir, c.encoding, filename)
}
func getCacheKey(encoding string, r *http.Request) cacheKey {
return cacheKey{
encoding: encoding,
urlPath: r.URL.Path,
}
}
type ref struct {
key cacheKey
done chan struct{}
err chan error
}
// Compressor represents a set of encoding configurations.
type Compressor struct {
logger slog.Logger
// The mapping of encoder names to encoder functions.
encoders map[string]EncoderFunc
// The mapping of pooled encoders to pools.
pooledEncoders map[string]*sync.Pool
// The list of encoders in order of decreasing precedence.
encodingPrecedence []string
level int // The compression level.
cacheDir string
orig http.FileSystem
mu sync.Mutex
cache map[cacheKey]ref
}
// NewCompressor creates a new Compressor that will handle encoding responses.
//
// The level should be one of the ones defined in the flate package.
// The types are the content types that are allowed to be compressed.
func NewCompressor(logger slog.Logger, level int, cacheDir string, orig http.FileSystem) *Compressor {
c := &Compressor{
logger: logger.Named("cachecompress"),
level: level,
encoders: make(map[string]EncoderFunc),
pooledEncoders: make(map[string]*sync.Pool),
cacheDir: cacheDir,
orig: orig,
cache: make(map[cacheKey]ref),
}
// Set the default encoders. The precedence order uses the reverse
// ordering that the encoders were added. This means adding new encoders
// will move them to the front of the order.
//
// TODO:
// lzma: Opera.
// sdch: Chrome, Android. Gzip output + dictionary header.
// br: Brotli, see https://github.com/go-chi/chi/pull/326
// HTTP 1.1 "deflate" (RFC 2616) stands for DEFLATE data (RFC 1951)
// wrapped with zlib (RFC 1950). The zlib wrapper uses Adler-32
// checksum compared to CRC-32 used in "gzip" and thus is faster.
//
// But.. some old browsers (MSIE, Safari 5.1) incorrectly expect
// raw DEFLATE data only, without the mentioned zlib wrapper.
// Because of this major confusion, most modern browsers try it
// both ways, first looking for zlib headers.
// Quote by Mark Adler: http://stackoverflow.com/a/9186091/385548
//
// The list of browsers having problems is quite big, see:
// http://zoompf.com/blog/2012/02/lose-the-wait-http-compression
// https://web.archive.org/web/20120321182910/http://www.vervestudios.co/projects/compression-tests/results
//
// That's why we prefer gzip over deflate. It's just more reliable
// and not significantly slower than deflate.
c.SetEncoder("deflate", encoderDeflate)
// TODO: Exception for old MSIE browsers that can't handle non-HTML?
// https://zoompf.com/blog/2012/02/lose-the-wait-http-compression
c.SetEncoder("gzip", encoderGzip)
// NOTE: Not implemented, intentionally:
// case "compress": // LZW. Deprecated.
// case "bzip2": // Too slow on-the-fly.
// case "zopfli": // Too slow on-the-fly.
// case "xz": // Too slow on-the-fly.
return c
}
// SetEncoder can be used to set the implementation of a compression algorithm.
//
// The encoding should be a standardized identifier. See:
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding
//
// For example, add the Brotli algorithm:
//
// import brotli_enc "gopkg.in/kothar/brotli-go.v0/enc"
//
// compressor := middleware.NewCompressor(5, "text/html")
// compressor.SetEncoder("br", func(w io.Writer, level int) io.Writer {
// params := brotli_enc.NewBrotliParams()
// params.SetQuality(level)
// return brotli_enc.NewBrotliWriter(params, w)
// })
func (c *Compressor) SetEncoder(encoding string, fn EncoderFunc) {
encoding = strings.ToLower(encoding)
if encoding == "" {
panic("the encoding can not be empty")
}
if fn == nil {
panic("attempted to set a nil encoder function")
}
// If we are adding a new encoder that is already registered, we have to
// clear that one out first.
delete(c.pooledEncoders, encoding)
delete(c.encoders, encoding)
// If the encoder supports Resetting (IoReseterWriter), then it can be pooled.
encoder := fn(io.Discard, c.level)
if _, ok := encoder.(ioResetterWriter); ok {
pool := &sync.Pool{
New: func() interface{} {
return fn(io.Discard, c.level)
},
}
c.pooledEncoders[encoding] = pool
}
// If the encoder is not in the pooledEncoders, add it to the normal encoders.
if _, ok := c.pooledEncoders[encoding]; !ok {
c.encoders[encoding] = fn
}
for i, v := range c.encodingPrecedence {
if v == encoding {
c.encodingPrecedence = append(c.encodingPrecedence[:i], c.encodingPrecedence[i+1:]...)
}
}
c.encodingPrecedence = append([]string{encoding}, c.encodingPrecedence...)
}
// ServeHTTP returns the response from the orig file system, compressed if possible.
func (c *Compressor) ServeHTTP(w http.ResponseWriter, r *http.Request) {
encoding := c.selectEncoder(r.Header)
// we can only serve a cached response if all the following:
// 1. they requested an encoding we support
// 2. they are requesting the whole file, not a range
// 3. the method is GET
if encoding == "" || r.Header.Get("Range") != "" || r.Method != "GET" {
http.FileServer(c.orig).ServeHTTP(w, r)
return
}
// Whether we should serve a cached response also depends in a fairly complex way on the path and request
// headers. In particular, we don't need a cached response for non-existing files/directories, and should not serve
// a cached response if the correct Etag for the file is provided. This logic is all handled by the http.FileServer,
// and we don't want to reimplement it here. So, what we'll do is send a HEAD request to the http.FileServer to see
// what it would do.
headReq := r.Clone(r.Context())
headReq.Method = http.MethodHead
headRW := &compressResponseWriter{
w: io.Discard,
headers: make(http.Header),
}
// deep-copy the headers already set on the response. This includes things like ETags.
for key, values := range w.Header() {
for _, value := range values {
headRW.headers.Add(key, value)
}
}
http.FileServer(c.orig).ServeHTTP(headRW, headReq)
if headRW.code != http.StatusOK {
// again, fall back to the file server. This is often a 404 Not Found, or a 304 Not Modified if they provided
// the correct ETag.
http.FileServer(c.orig).ServeHTTP(w, r)
return
}
cref := c.getRef(encoding, r)
c.serveRef(w, r, headRW.headers, cref)
}
func (c *Compressor) serveRef(w http.ResponseWriter, r *http.Request, headers http.Header, cref ref) {
select {
case <-r.Context().Done():
w.WriteHeader(http.StatusServiceUnavailable)
return
case <-cref.done:
cachePath := cref.key.filePath(c.cacheDir)
cacheFile, err := os.Open(cachePath)
if err != nil {
c.logger.Error(context.Background(), "failed to open compressed cache file",
slog.F("cache_path", cachePath), slog.F("url_path", cref.key.urlPath), slog.Error(err))
// fall back to uncompressed
http.FileServer(c.orig).ServeHTTP(w, r)
}
defer cacheFile.Close()
// we need to remove or modify the Content-Length, if any, set by the FileServer because it will be for
// uncompressed data and wrong.
info, err := cacheFile.Stat()
if err != nil {
c.logger.Error(context.Background(), "failed to stat compressed cache file",
slog.F("cache_path", cachePath), slog.F("url_path", cref.key.urlPath), slog.Error(err))
headers.Del("Content-Length")
} else {
headers.Set("Content-Length", fmt.Sprintf("%d", info.Size()))
}
for key, values := range headers {
for _, value := range values {
w.Header().Add(key, value)
}
}
w.Header().Set("Content-Encoding", cref.key.encoding)
w.Header().Add("Vary", "Accept-Encoding")
w.WriteHeader(http.StatusOK)
_, err = io.Copy(w, cacheFile)
if err != nil {
// most commonly, the writer will hang up before we are done.
c.logger.Debug(context.Background(), "failed to write compressed cache file", slog.Error(err))
}
return
case <-cref.err:
// fall back to uncompressed
http.FileServer(c.orig).ServeHTTP(w, r)
return
}
}
func (c *Compressor) getRef(encoding string, r *http.Request) ref {
ck := getCacheKey(encoding, r)
c.mu.Lock()
defer c.mu.Unlock()
cref, ok := c.cache[ck]
if ok {
return cref
}
// we are the first to encode
cref = ref{
key: ck,
done: make(chan struct{}),
err: make(chan error),
}
c.cache[ck] = cref
go c.compress(context.Background(), encoding, cref, r)
return cref
}
func (c *Compressor) compress(ctx context.Context, encoding string, cref ref, r *http.Request) {
cachePath := cref.key.filePath(c.cacheDir)
var err error
// we want to handle closing either cref.done or cref.err in a defer at the bottom of the stack so that the encoder
// and cache file are both closed first (higher in the defer stack). This prevents data races where waiting HTTP
// handlers start reading the file before all the data has been flushed.
defer func() {
if err != nil {
if rErr := os.Remove(cachePath); rErr != nil {
// nolint: gocritic // best effort, just debug log any errors
c.logger.Debug(ctx, "failed to remove cache file",
slog.F("main_err", err), slog.F("remove_err", rErr), slog.F("cache_path", cachePath))
}
c.mu.Lock()
delete(c.cache, cref.key)
c.mu.Unlock()
close(cref.err)
return
}
close(cref.done)
}()
cacheDir := filepath.Dir(cachePath)
err = os.MkdirAll(cacheDir, 0o700)
if err != nil {
c.logger.Error(ctx, "failed to create cache directory", slog.F("cache_dir", cacheDir))
return
}
// We will truncate and overwrite any existing files. This is important in the case that we get restarted
// with the same cache dir, possibly with different source files.
cacheFile, err := os.OpenFile(cachePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
if err != nil {
c.logger.Error(ctx, "failed to open compression cache file",
slog.F("path", cachePath), slog.Error(err))
return
}
defer cacheFile.Close()
encoder, cleanup := c.getEncoder(encoding, cacheFile)
if encoder == nil {
// can only hit this if there is a programming error
c.logger.Critical(ctx, "got nil encoder", slog.F("encoding", encoding))
err = xerrors.New("nil encoder")
return
}
defer cleanup()
defer encoder.Close() // ensures we flush, needs to be called before cleanup(), so we defer after it.
cw := &compressResponseWriter{
w: encoder,
headers: make(http.Header), // ignored
}
http.FileServer(c.orig).ServeHTTP(cw, r)
if cw.code != http.StatusOK {
// log at debug because this is likely just a 404
c.logger.Debug(ctx, "file server failed to serve",
slog.F("encoding", encoding), slog.F("url_path", cref.key.urlPath), slog.F("http_code", cw.code))
// mark the error so that we clean up correctly
err = xerrors.New("file server failed to serve")
return
}
// success!
}
// selectEncoder returns the name of the encoder
func (c *Compressor) selectEncoder(h http.Header) string {
header := h.Get("Accept-Encoding")
// Parse the names of all accepted algorithms from the header.
accepted := strings.Split(strings.ToLower(header), ",")
// Find supported encoder by accepted list by precedence
for _, name := range c.encodingPrecedence {
if matchAcceptEncoding(accepted, name) {
return name
}
}
// No encoder found to match the accepted encoding
return ""
}
// getEncoder returns a writer that encodes and writes to the provided writer, and a cleanup func.
func (c *Compressor) getEncoder(name string, w io.Writer) (io.WriteCloser, func()) {
if pool, ok := c.pooledEncoders[name]; ok {
encoder, typeOK := pool.Get().(ioResetterWriter)
if !typeOK {
return nil, nil
}
cleanup := func() {
pool.Put(encoder)
}
encoder.Reset(w)
return encoder, cleanup
}
if fn, ok := c.encoders[name]; ok {
return fn(w, c.level), func() {}
}
return nil, nil
}
func matchAcceptEncoding(accepted []string, encoding string) bool {
for _, v := range accepted {
if strings.Contains(v, encoding) {
return true
}
}
return false
}
// An EncoderFunc is a function that wraps the provided io.Writer with a
// streaming compression algorithm and returns it.
//
// In case of failure, the function should return nil.
type EncoderFunc func(w io.Writer, level int) io.WriteCloser
// Interface for types that allow resetting io.Writers.
type ioResetterWriter interface {
io.WriteCloser
Reset(w io.Writer)
}
func encoderGzip(w io.Writer, level int) io.WriteCloser {
gw, err := gzip.NewWriterLevel(w, level)
if err != nil {
return nil
}
return gw
}
func encoderDeflate(w io.Writer, level int) io.WriteCloser {
dw, err := flate.NewWriter(w, level)
if err != nil {
return nil
}
return dw
}
type compressResponseWriter struct {
w io.Writer
headers http.Header
code int
}
func (cw *compressResponseWriter) Header() http.Header {
return cw.headers
}
func (cw *compressResponseWriter) WriteHeader(code int) {
cw.code = code
}
func (cw *compressResponseWriter) Write(p []byte) (int, error) {
if cw.code == 0 {
cw.code = http.StatusOK
}
return cw.w.Write(p)
}
@@ -1,227 +0,0 @@
package cachecompress
import (
"bytes"
"compress/flate"
"compress/gzip"
"context"
"io"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/require"
"github.com/coder/coder/v2/testutil"
)
func TestCompressorEncodings(t *testing.T) {
t.Parallel()
tests := []struct {
name string
path string
expectedEncoding string
acceptedEncodings []string
}{
{
name: "no expected encodings due to no accepted encodings",
path: "/file.html",
acceptedEncodings: nil,
expectedEncoding: "",
},
{
name: "gzip is only encoding",
path: "/file.html",
acceptedEncodings: []string{"gzip"},
expectedEncoding: "gzip",
},
{
name: "gzip is preferred over deflate",
path: "/file.html",
acceptedEncodings: []string{"gzip", "deflate"},
expectedEncoding: "gzip",
},
{
name: "deflate is used",
path: "/file.html",
acceptedEncodings: []string{"deflate"},
expectedEncoding: "deflate",
},
{
name: "nop is preferred",
path: "/file.html",
acceptedEncodings: []string{"nop, gzip, deflate"},
expectedEncoding: "nop",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
logger := testutil.Logger(t)
tempDir := t.TempDir()
cacheDir := filepath.Join(tempDir, "cache")
err := os.MkdirAll(cacheDir, 0o700)
require.NoError(t, err)
srcDir := filepath.Join(tempDir, "src")
err = os.MkdirAll(srcDir, 0o700)
require.NoError(t, err)
err = os.WriteFile(filepath.Join(srcDir, "file.html"), []byte("textstring"), 0o600)
require.NoError(t, err)
compressor := NewCompressor(logger, 5, cacheDir, http.FS(os.DirFS(srcDir)))
if len(compressor.encoders) != 0 || len(compressor.pooledEncoders) != 2 {
t.Errorf("gzip and deflate should be pooled")
}
logger.Debug(context.Background(), "started compressor")
compressor.SetEncoder("nop", func(w io.Writer, _ int) io.WriteCloser {
return nopEncoder{w}
})
if len(compressor.encoders) != 1 {
t.Errorf("nop encoder should be stored in the encoders map")
}
ts := httptest.NewServer(compressor)
defer ts.Close()
// ctx := testutil.Context(t, testutil.WaitShort)
ctx := context.Background()
header, respString := testRequestWithAcceptedEncodings(ctx, t, ts, "GET", tc.path, tc.acceptedEncodings...)
if respString != "textstring" {
t.Errorf("response text doesn't match; expected:%q, got:%q", "textstring", respString)
}
if got := header.Get("Content-Encoding"); got != tc.expectedEncoding {
t.Errorf("expected encoding %q but got %q", tc.expectedEncoding, got)
}
})
}
}
func testRequestWithAcceptedEncodings(ctx context.Context, t *testing.T, ts *httptest.Server, method, path string, encodings ...string) (http.Header, string) {
req, err := http.NewRequestWithContext(ctx, method, ts.URL+path, nil)
if err != nil {
t.Fatal(err)
return nil, ""
}
if len(encodings) > 0 {
encodingsString := strings.Join(encodings, ",")
req.Header.Set("Accept-Encoding", encodingsString)
}
transport := http.DefaultTransport.(*http.Transport).Clone()
transport.DisableCompression = true // prevent automatically setting gzip
resp, err := (&http.Client{Transport: transport}).Do(req)
require.NoError(t, err)
respBody := decodeResponseBody(t, resp)
defer resp.Body.Close()
return resp.Header, respBody
}
func decodeResponseBody(t *testing.T, resp *http.Response) string {
var reader io.ReadCloser
t.Logf("encoding: '%s'", resp.Header.Get("Content-Encoding"))
rawBody, err := io.ReadAll(resp.Body)
require.NoError(t, err)
t.Logf("raw body: %x", rawBody)
switch resp.Header.Get("Content-Encoding") {
case "gzip":
var err error
reader, err = gzip.NewReader(bytes.NewReader(rawBody))
require.NoError(t, err)
case "deflate":
reader = flate.NewReader(bytes.NewReader(rawBody))
default:
return string(rawBody)
}
respBody, err := io.ReadAll(reader)
require.NoError(t, err, "failed to read response body: %T %+v", err, err)
err = reader.Close()
require.NoError(t, err)
return string(respBody)
}
type nopEncoder struct {
io.Writer
}
func (nopEncoder) Close() error { return nil }
// nolint: tparallel // we want to assert the state of the cache, so run synchronously
func TestCompressorHeadings(t *testing.T) {
t.Parallel()
logger := testutil.Logger(t)
tempDir := t.TempDir()
cacheDir := filepath.Join(tempDir, "cache")
err := os.MkdirAll(cacheDir, 0o700)
require.NoError(t, err)
srcDir := filepath.Join(tempDir, "src")
err = os.MkdirAll(srcDir, 0o700)
require.NoError(t, err)
err = os.WriteFile(filepath.Join(srcDir, "file.html"), []byte("textstring"), 0o600)
require.NoError(t, err)
compressor := NewCompressor(logger, 5, cacheDir, http.FS(os.DirFS(srcDir)))
ts := httptest.NewServer(compressor)
defer ts.Close()
tests := []struct {
name string
path string
}{
{
name: "exists",
path: "/file.html",
},
{
name: "not found",
path: "/missing.html",
},
{
name: "not found directory",
path: "/a_directory/",
},
}
// nolint: paralleltest // we want to assert the state of the cache, so run synchronously
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
ctx := testutil.Context(t, testutil.WaitShort)
req := httptest.NewRequestWithContext(ctx, "GET", tc.path, nil)
// request directly from http.FileServer as our baseline response
respROrig := httptest.NewRecorder()
http.FileServer(http.Dir(srcDir)).ServeHTTP(respROrig, req)
respOrig := respROrig.Result()
req.Header.Add("Accept-Encoding", "gzip")
// serve twice so that we go thru cache hit and cache miss code
for range 2 {
respRec := httptest.NewRecorder()
compressor.ServeHTTP(respRec, req)
respComp := respRec.Result()
require.Equal(t, respOrig.StatusCode, respComp.StatusCode)
for key, values := range respOrig.Header {
if key == "Content-Length" {
continue // we don't get length on compressed responses
}
require.Equal(t, values, respComp.Header[key])
}
}
})
}
// only the cache hit should leave a file around
files, err := os.ReadDir(srcDir)
require.NoError(t, err)
require.Len(t, files, 1)
}

Some files were not shown because too many files have changed in this diff Show More