Compare commits
167 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 1f3e65e6f0 | |||
| 3d411ddf4c | |||
| 26d029022d | |||
| 2a5d86e2aa | |||
| eef18424e3 | |||
| 926369b9f2 | |||
| e17b445e55 | |||
| dc5b877f26 | |||
| cb5ddec5c5 | |||
| eb020611a3 | |||
| 697b3a0a06 | |||
| acd6fe7aeb | |||
| 0b214ad7f6 | |||
| 17438d9730 | |||
| 279288affe | |||
| c571995a42 | |||
| bea2f8633a | |||
| dc9166b4cd | |||
| db22227f08 | |||
| 0eb8e904a1 | |||
| 2734123ac2 | |||
| 37432aefa6 | |||
| cf746f3a87 | |||
| ea533aa522 | |||
| 979df63788 | |||
| c4e9749146 | |||
| fb785d3524 | |||
| a899fc57a6 | |||
| 77e2521fa0 | |||
| c627a68e96 | |||
| 7ae3fdc749 | |||
| 7b6e72438b | |||
| 8f78baddb1 | |||
| 0f8f67ec6f | |||
| 9298e7e073 | |||
| 7182c53df7 | |||
| 37222199c3 | |||
| 9c47733e16 | |||
| 139dab7cfe | |||
| d306a2d7e5 | |||
| 30d2fc8bfc | |||
| d80b5fc8ed | |||
| 197b422a31 | |||
| 38017010ce | |||
| 984a834e81 | |||
| 2bcf08457b | |||
| 73dedcc765 | |||
| 94f6e83cfa | |||
| bc0c4ebaa7 | |||
| dc277618ee | |||
| b90c74a94d | |||
| ff532d9bf3 | |||
| 54497f4f6b | |||
| 9629d873fb | |||
| 643fe38b1e | |||
| c827a08c11 | |||
| 1b6556c2f6 | |||
| 859e94d67a | |||
| 50749d131b | |||
| 9986dc0c38 | |||
| 92b63871ca | |||
| 303e9ef7de | |||
| 1ebc217624 | |||
| 06dbadab11 | |||
| 566146af72 | |||
| 7e8fcb4b0f | |||
| dd28eef5b4 | |||
| 2f886ce8d0 | |||
| dcfd6d6f73 | |||
| b20fd6f2c1 | |||
| 2294c55bd9 | |||
| aad1b401c1 | |||
| a8294872a3 | |||
| 95a1ca898f | |||
| c3e3bb58f2 | |||
| 0d765f56f7 | |||
| 8b6f55c312 | |||
| 40fc337659 | |||
| f6df4c0ed8 | |||
| 924afb753f | |||
| 45c43d4ec4 | |||
| a1e7e105a4 | |||
| cf93c34172 | |||
| 659f89e079 | |||
| e4e4669feb | |||
| a1fa58ac17 | |||
| 88b7372e7f | |||
| dec6d310a8 | |||
| e720afa9d0 | |||
| d18441debe | |||
| 4f7b279fd8 | |||
| c3cbd977f1 | |||
| d8b1ca70d6 | |||
| 5a3ceb38f0 | |||
| cadf1352b4 | |||
| ed3d6fa9e3 | |||
| d9c40d61c2 | |||
| 90b64c5e04 | |||
| d0fb4599f0 | |||
| ffe22a0ffc | |||
| a1161b79a7 | |||
| dd92fbc83c | |||
| 10d2844cce | |||
| 277b2d21ca | |||
| af3ff825a1 | |||
| 50ba223aa1 | |||
| 6318520501 | |||
| f3f83540df | |||
| 10ef5933f4 | |||
| c8538769a2 | |||
| 5743149396 | |||
| 9780d0295c | |||
| b89093031e | |||
| 87045fc27b | |||
| b8a0f97cab | |||
| 7bad7e35ae | |||
| cd0a2849d0 | |||
| f6e86c6fdb | |||
| c301a0d804 | |||
| 6c621364f8 | |||
| 51d3abb904 | |||
| c6e551f538 | |||
| f684831f56 | |||
| f947a34103 | |||
| fb9d8e3030 | |||
| e60112e54f | |||
| e8e31dcb2c | |||
| 40e1784846 | |||
| 5a31c590e6 | |||
| e13a34c145 | |||
| 33b42fca7a | |||
| 86ef3fb497 | |||
| 13ca9ead3a | |||
| 906149317d | |||
| 6187acff8a | |||
| a106d67c07 | |||
| 2c6cbf15e2 | |||
| 1cb2ac65e5 | |||
| c6f63990cf | |||
| 9855460524 | |||
| 79728c30fa | |||
| 8daf4f35b1 | |||
| 5c802c2627 | |||
| 0f342ecc04 | |||
| e62c5db678 | |||
| 4244b20823 | |||
| 70cc3dd14a | |||
| d455f6ea2b | |||
| 4bd7c7b7e0 | |||
| 5f97ad0988 | |||
| 48f77d0c01 | |||
| da31a4bed9 | |||
| 9730c86f17 | |||
| 5ecab7b5f0 | |||
| df3b1bb6c7 | |||
| caeca1097b | |||
| 823b14aa34 | |||
| f2a410566c | |||
| aa689cbb39 | |||
| 1230cacf78 | |||
| 7bbeef4999 | |||
| f64ac8f5f7 | |||
| 69c2c40512 | |||
| 9da60a9dc5 | |||
| e73f9d356b | |||
| 87ce021035 | |||
| 86f0f39863 |
@@ -91,6 +91,9 @@
|
||||
|
||||
## Systematic Debugging Approach
|
||||
|
||||
YOU MUST ALWAYS find the root cause of any issue you are debugging
|
||||
YOU MUST NEVER fix a symptom or add a workaround instead of finding a root cause, even if it is faster.
|
||||
|
||||
### Multi-Issue Problem Solving
|
||||
|
||||
When facing multiple failing tests or complex integration issues:
|
||||
@@ -98,16 +101,21 @@ When facing multiple failing tests or complex integration issues:
|
||||
1. **Identify Root Causes**:
|
||||
- Run failing tests individually to isolate issues
|
||||
- Use LSP tools to trace through call chains
|
||||
- Check both compilation and runtime errors
|
||||
- Read Error Messages Carefully: Check both compilation and runtime errors
|
||||
- Reproduce Consistently: Ensure you can reliably reproduce the issue before investigating
|
||||
- Check Recent Changes: What changed that could have caused this? Git diff, recent commits, etc.
|
||||
- When You Don't Know: Say "I don't understand X" rather than pretending to know
|
||||
|
||||
2. **Fix in Logical Order**:
|
||||
- Address compilation issues first (imports, syntax)
|
||||
- Fix authorization and RBAC issues next
|
||||
- Resolve business logic and validation issues
|
||||
- Handle edge cases and race conditions last
|
||||
- IF your first fix doesn't work, STOP and re-analyze rather than adding more fixes
|
||||
|
||||
3. **Verification Strategy**:
|
||||
- Test each fix individually before moving to next issue
|
||||
- Always Test each fix individually before moving to next issue
|
||||
- Verify Before Continuing: Did your test work? If not, form new hypothesis - don't add more fixes
|
||||
- Use `make lint` and `make gen` after database changes
|
||||
- Verify RFC compliance with actual specifications
|
||||
- Run comprehensive test suites before considering complete
|
||||
|
||||
@@ -40,11 +40,15 @@
|
||||
- Use proper error types
|
||||
- Pattern: `xerrors.Errorf("failed to X: %w", err)`
|
||||
|
||||
### Naming Conventions
|
||||
## Naming Conventions
|
||||
|
||||
- Use clear, descriptive names
|
||||
- Abbreviate only when obvious
|
||||
- Names MUST tell what code does, not how it's implemented or its history
|
||||
- Follow Go and TypeScript naming conventions
|
||||
- When changing code, never document the old behavior or the behavior change
|
||||
- NEVER use implementation details in names (e.g., "ZodValidator", "MCPWrapper", "JSONParser")
|
||||
- NEVER use temporal/historical context in names (e.g., "LegacyHandler", "UnifiedTool", "ImprovedInterface", "EnhancedParser")
|
||||
- NEVER use pattern names unless they add clarity (e.g., prefer "Tool" over "ToolFactory")
|
||||
- Abbreviate only when obvious
|
||||
|
||||
### Comments
|
||||
|
||||
|
||||
@@ -5,6 +5,13 @@ runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Setup sqlc
|
||||
uses: sqlc-dev/setup-sqlc@c0209b9199cd1cce6a14fc27cabcec491b651761 # v4.0.0
|
||||
with:
|
||||
sqlc-version: "1.27.0"
|
||||
# uses: sqlc-dev/setup-sqlc@c0209b9199cd1cce6a14fc27cabcec491b651761 # v4.0.0
|
||||
# with:
|
||||
# sqlc-version: "1.30.0"
|
||||
|
||||
# Switched to coder/sqlc fork to fix ambiguous column bug, see:
|
||||
# - https://github.com/coder/sqlc/pull/1
|
||||
# - https://github.com/sqlc-dev/sqlc/pull/4159
|
||||
shell: bash
|
||||
run: |
|
||||
CGO_ENABLED=1 go install github.com/coder/sqlc/cmd/sqlc@aab4e865a51df0c43e1839f81a9d349b41d14f05
|
||||
|
||||
@@ -7,5 +7,5 @@ runs:
|
||||
- name: Install Terraform
|
||||
uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3.1.2
|
||||
with:
|
||||
terraform_version: 1.13.0
|
||||
terraform_version: 1.13.4
|
||||
terraform_wrapper: false
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
app = "sao-paulo-coder"
|
||||
primary_region = "gru"
|
||||
|
||||
[experimental]
|
||||
entrypoint = ["/bin/sh", "-c", "CODER_DERP_SERVER_RELAY_URL=\"http://[${FLY_PRIVATE_IP}]:3000\" /opt/coder wsproxy server"]
|
||||
auto_rollback = true
|
||||
|
||||
[build]
|
||||
image = "ghcr.io/coder/coder-preview:main"
|
||||
|
||||
[env]
|
||||
CODER_ACCESS_URL = "https://sao-paulo.fly.dev.coder.com"
|
||||
CODER_HTTP_ADDRESS = "0.0.0.0:3000"
|
||||
CODER_PRIMARY_ACCESS_URL = "https://dev.coder.com"
|
||||
CODER_WILDCARD_ACCESS_URL = "*--apps.sao-paulo.fly.dev.coder.com"
|
||||
CODER_VERBOSE = "true"
|
||||
|
||||
[http_service]
|
||||
internal_port = 3000
|
||||
force_https = true
|
||||
auto_stop_machines = true
|
||||
auto_start_machines = true
|
||||
min_machines_running = 0
|
||||
|
||||
# Ref: https://fly.io/docs/reference/configuration/#http_service-concurrency
|
||||
[http_service.concurrency]
|
||||
type = "requests"
|
||||
soft_limit = 50
|
||||
hard_limit = 100
|
||||
|
||||
[[vm]]
|
||||
cpu_kind = "shared"
|
||||
cpus = 2
|
||||
memory_mb = 512
|
||||
+16
-28
@@ -181,7 +181,7 @@ jobs:
|
||||
echo "LINT_CACHE_DIR=$dir" >> "$GITHUB_ENV"
|
||||
|
||||
- name: golangci-lint cache
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: |
|
||||
${{ env.LINT_CACHE_DIR }}
|
||||
@@ -191,7 +191,7 @@ jobs:
|
||||
|
||||
# Check for any typos
|
||||
- name: Check for typos
|
||||
uses: crate-ci/typos@85f62a8a84f939ae994ab3763f01a0296d61a7ee # v1.36.2
|
||||
uses: crate-ci/typos@80c8a4945eec0f6d464eaf9e65ed98ef085283d1 # v1.38.1
|
||||
with:
|
||||
config: .github/workflows/typos.toml
|
||||
|
||||
@@ -230,7 +230,7 @@ jobs:
|
||||
shell: bash
|
||||
|
||||
gen:
|
||||
timeout-minutes: 8
|
||||
timeout-minutes: 20
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
if: ${{ !cancelled() }}
|
||||
steps:
|
||||
@@ -271,6 +271,7 @@ jobs:
|
||||
popd
|
||||
|
||||
- name: make gen
|
||||
timeout-minutes: 8
|
||||
run: |
|
||||
# Remove golden files to detect discrepancy in generated files.
|
||||
make clean/golden-files
|
||||
@@ -288,7 +289,7 @@ jobs:
|
||||
needs: changes
|
||||
if: needs.changes.outputs.offlinedocs-only == 'false' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
timeout-minutes: 7
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
@@ -315,6 +316,7 @@ jobs:
|
||||
run: go install mvdan.cc/sh/v3/cmd/shfmt@v3.7.0
|
||||
|
||||
- name: make fmt
|
||||
timeout-minutes: 7
|
||||
run: |
|
||||
PATH="${PATH}:$(go env GOPATH)/bin" \
|
||||
make --output-sync -j -B fmt
|
||||
@@ -376,13 +378,6 @@ jobs:
|
||||
id: go-paths
|
||||
uses: ./.github/actions/setup-go-paths
|
||||
|
||||
- name: Download Go Build Cache
|
||||
id: download-go-build-cache
|
||||
uses: ./.github/actions/test-cache/download
|
||||
with:
|
||||
key-prefix: test-go-build-${{ runner.os }}-${{ runner.arch }}
|
||||
cache-path: ${{ steps.go-paths.outputs.cached-dirs }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
with:
|
||||
@@ -390,8 +385,7 @@ jobs:
|
||||
# download the toolchain configured in go.mod, so we don't
|
||||
# need to reinstall it. It's faster on Windows runners.
|
||||
use-preinstalled-go: ${{ runner.os == 'Windows' }}
|
||||
# Cache is already downloaded above
|
||||
use-cache: false
|
||||
use-cache: true
|
||||
|
||||
- name: Setup Terraform
|
||||
uses: ./.github/actions/setup-tf
|
||||
@@ -500,17 +494,11 @@ jobs:
|
||||
make test
|
||||
|
||||
- name: Upload failed test db dumps
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: failed-test-db-dump-${{matrix.os}}
|
||||
path: "**/*.test.sql"
|
||||
|
||||
- name: Upload Go Build Cache
|
||||
uses: ./.github/actions/test-cache/upload
|
||||
with:
|
||||
cache-key: ${{ steps.download-go-build-cache.outputs.cache-key }}
|
||||
cache-path: ${{ steps.go-paths.outputs.cached-dirs }}
|
||||
|
||||
- name: Upload Test Cache
|
||||
uses: ./.github/actions/test-cache/upload
|
||||
with:
|
||||
@@ -762,7 +750,7 @@ jobs:
|
||||
|
||||
- name: Upload Playwright Failed Tests
|
||||
if: always() && github.actor != 'dependabot[bot]' && runner.os == 'Linux' && !github.event.pull_request.head.repo.fork
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: failed-test-videos${{ matrix.variant.premium && '-premium' || '' }}
|
||||
path: ./site/test-results/**/*.webm
|
||||
@@ -770,7 +758,7 @@ jobs:
|
||||
|
||||
- name: Upload pprof dumps
|
||||
if: always() && github.actor != 'dependabot[bot]' && runner.os == 'Linux' && !github.event.pull_request.head.repo.fork
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: debug-pprof-dumps${{ matrix.variant.premium && '-premium' || '' }}
|
||||
path: ./site/test-results/**/debug-pprof-*.txt
|
||||
@@ -806,7 +794,7 @@ jobs:
|
||||
# the check to pass. This is desired in PRs, but not in mainline.
|
||||
- name: Publish to Chromatic (non-mainline)
|
||||
if: github.ref != 'refs/heads/main' && github.repository_owner == 'coder'
|
||||
uses: chromaui/action@20c7e42e1b2f6becd5d188df9acb02f3e2f51519 # v13.2.0
|
||||
uses: chromaui/action@bc2d84ad2b60813a67d995c5582d696104a19383 # v13.3.2
|
||||
env:
|
||||
NODE_OPTIONS: "--max_old_space_size=4096"
|
||||
STORYBOOK: true
|
||||
@@ -838,7 +826,7 @@ jobs:
|
||||
# infinitely "in progress" in mainline unless we re-review each build.
|
||||
- name: Publish to Chromatic (mainline)
|
||||
if: github.ref == 'refs/heads/main' && github.repository_owner == 'coder'
|
||||
uses: chromaui/action@20c7e42e1b2f6becd5d188df9acb02f3e2f51519 # v13.2.0
|
||||
uses: chromaui/action@bc2d84ad2b60813a67d995c5582d696104a19383 # v13.3.2
|
||||
env:
|
||||
NODE_OPTIONS: "--max_old_space_size=4096"
|
||||
STORYBOOK: true
|
||||
@@ -1036,7 +1024,7 @@ jobs:
|
||||
|
||||
- name: Upload build artifacts
|
||||
if: ${{ github.repository_owner == 'coder' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release/')) }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: dylibs
|
||||
path: |
|
||||
@@ -1123,7 +1111,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: GHCR Login
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
@@ -1201,7 +1189,7 @@ jobs:
|
||||
uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db # v3.0.1
|
||||
|
||||
- name: Download dylibs
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: dylibs
|
||||
path: ./build
|
||||
@@ -1468,7 +1456,7 @@ jobs:
|
||||
|
||||
- name: Upload build artifacts
|
||||
if: github.ref == 'refs/heads/main'
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: coder
|
||||
path: |
|
||||
|
||||
@@ -76,7 +76,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: GHCR Login
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
@@ -92,7 +92,7 @@ jobs:
|
||||
uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db # v3.0.1
|
||||
|
||||
- name: Set up Flux CLI
|
||||
uses: fluxcd/flux2/action@6bf37f6a560fd84982d67f853162e4b3c2235edb # v2.6.4
|
||||
uses: fluxcd/flux2/action@4a15fa6a023259353ef750acf1c98fe88407d4d0 # v2.7.2
|
||||
with:
|
||||
# Keep this and the github action up to date with the version of flux installed in dogfood cluster
|
||||
version: "2.7.0"
|
||||
@@ -163,12 +163,10 @@ jobs:
|
||||
run: |
|
||||
flyctl deploy --image "$IMAGE" --app paris-coder --config ./.github/fly-wsproxies/paris-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_PARIS" --yes
|
||||
flyctl deploy --image "$IMAGE" --app sydney-coder --config ./.github/fly-wsproxies/sydney-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_SYDNEY" --yes
|
||||
flyctl deploy --image "$IMAGE" --app sao-paulo-coder --config ./.github/fly-wsproxies/sao-paulo-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_SAO_PAULO" --yes
|
||||
flyctl deploy --image "$IMAGE" --app jnb-coder --config ./.github/fly-wsproxies/jnb-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_JNB" --yes
|
||||
env:
|
||||
FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }}
|
||||
IMAGE: ${{ inputs.image }}
|
||||
TOKEN_PARIS: ${{ secrets.FLY_PARIS_CODER_PROXY_SESSION_TOKEN }}
|
||||
TOKEN_SYDNEY: ${{ secrets.FLY_SYDNEY_CODER_PROXY_SESSION_TOKEN }}
|
||||
TOKEN_SAO_PAULO: ${{ secrets.FLY_SAO_PAULO_CODER_PROXY_SESSION_TOKEN }}
|
||||
TOKEN_JNB: ${{ secrets.FLY_JNB_CODER_PROXY_SESSION_TOKEN }}
|
||||
|
||||
@@ -48,7 +48,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Docker login
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
|
||||
@@ -30,7 +30,7 @@ jobs:
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- uses: tj-actions/changed-files@4563c729c555b4141fac99c80f699f571219b836 # v45.0.7
|
||||
- uses: tj-actions/changed-files@dbf178ceecb9304128c8e0648591d71208c6e2c9 # v45.0.7
|
||||
id: changed-files
|
||||
with:
|
||||
files: |
|
||||
|
||||
@@ -36,11 +36,11 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup Nix
|
||||
uses: nixbuild/nix-quick-install-action@1f095fee853b33114486cfdeae62fa099cda35a9 # v33
|
||||
uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34
|
||||
with:
|
||||
# Pinning to 2.28 here, as Nix gets a "error: [json.exception.type_error.302] type must be array, but is string"
|
||||
# on version 2.29 and above.
|
||||
nix_version: "2.28.4"
|
||||
nix_version: "2.28.5"
|
||||
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
with:
|
||||
@@ -82,7 +82,7 @@ jobs:
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: github.ref == 'refs/heads/main'
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
@@ -189,7 +189,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Find Comment
|
||||
uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3.1.0
|
||||
uses: peter-evans/find-comment@b30e6a3c0ed37e7c023ccd3f1db5c6c0b0c23aad # v4.0.0
|
||||
id: fc
|
||||
with:
|
||||
issue-number: ${{ needs.get_info.outputs.PR_NUMBER }}
|
||||
@@ -199,7 +199,7 @@ jobs:
|
||||
|
||||
- name: Comment on PR
|
||||
id: comment_id
|
||||
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4.0.0
|
||||
uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v5.0.0
|
||||
with:
|
||||
comment-id: ${{ steps.fc.outputs.comment-id }}
|
||||
issue-number: ${{ needs.get_info.outputs.PR_NUMBER }}
|
||||
@@ -248,7 +248,7 @@ jobs:
|
||||
uses: ./.github/actions/setup-sqlc
|
||||
|
||||
- name: GHCR Login
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
@@ -491,7 +491,7 @@ jobs:
|
||||
PASSWORD: ${{ steps.setup_deployment.outputs.password }}
|
||||
|
||||
- name: Find Comment
|
||||
uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3.1.0
|
||||
uses: peter-evans/find-comment@b30e6a3c0ed37e7c023ccd3f1db5c6c0b0c23aad # v4.0.0
|
||||
id: fc
|
||||
with:
|
||||
issue-number: ${{ env.PR_NUMBER }}
|
||||
@@ -500,7 +500,7 @@ jobs:
|
||||
direction: last
|
||||
|
||||
- name: Comment on PR
|
||||
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4.0.0
|
||||
uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v5.0.0
|
||||
env:
|
||||
STATUS: ${{ needs.get_info.outputs.NEW == 'true' && 'Created' || 'Updated' }}
|
||||
with:
|
||||
|
||||
@@ -131,7 +131,7 @@ jobs:
|
||||
AC_CERTIFICATE_PASSWORD_FILE: /tmp/apple_cert_password.txt
|
||||
|
||||
- name: Upload build artifacts
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: dylibs
|
||||
path: |
|
||||
@@ -239,7 +239,7 @@ jobs:
|
||||
cat "$CODER_RELEASE_NOTES_FILE"
|
||||
|
||||
- name: Docker Login
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
@@ -327,7 +327,7 @@ jobs:
|
||||
uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db # v3.0.1
|
||||
|
||||
- name: Download dylibs
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: dylibs
|
||||
path: ./build
|
||||
@@ -761,7 +761,7 @@ jobs:
|
||||
|
||||
- name: Upload artifacts to actions (if dry-run)
|
||||
if: ${{ inputs.dry_run }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: release-artifacts
|
||||
path: |
|
||||
@@ -777,7 +777,7 @@ jobs:
|
||||
|
||||
- name: Upload latest sbom artifact to actions (if dry-run)
|
||||
if: inputs.dry_run && steps.build_docker.outputs.created_latest_tag == 'true'
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: latest-sbom-artifact
|
||||
path: ./coder_latest_sbom.spdx.json
|
||||
@@ -785,7 +785,7 @@ jobs:
|
||||
|
||||
- name: Send repository-dispatch event
|
||||
if: ${{ !inputs.dry_run }}
|
||||
uses: peter-evans/repository-dispatch@ff45666b9427631e3450c54a1bcbee4d9ff4d7c0 # v3.0.0
|
||||
uses: peter-evans/repository-dispatch@5fc4efd1a4797ddb68ffd0714a238564e4cc0e6f # v4.0.0
|
||||
with:
|
||||
token: ${{ secrets.CDRCI_GITHUB_TOKEN }}
|
||||
repository: coder/packages
|
||||
|
||||
@@ -30,7 +30,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2
|
||||
uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
|
||||
# Upload the results as artifacts.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
@@ -47,6 +47,6 @@ jobs:
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@192325c86100d080feab897ff886c34abd4c83a3 # v3.29.5
|
||||
uses: github/codeql-action/upload-sarif@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v3.29.5
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@192325c86100d080feab897ff886c34abd4c83a3 # v3.29.5
|
||||
uses: github/codeql-action/init@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v3.29.5
|
||||
with:
|
||||
languages: go, javascript
|
||||
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
rm Makefile
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@192325c86100d080feab897ff886c34abd4c83a3 # v3.29.5
|
||||
uses: github/codeql-action/analyze@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v3.29.5
|
||||
|
||||
- name: Send Slack notification on failure
|
||||
if: ${{ failure() }}
|
||||
@@ -154,13 +154,13 @@ jobs:
|
||||
severity: "CRITICAL,HIGH"
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@192325c86100d080feab897ff886c34abd4c83a3 # v3.29.5
|
||||
uses: github/codeql-action/upload-sarif@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v3.29.5
|
||||
with:
|
||||
sarif_file: trivy-results.sarif
|
||||
category: "Trivy"
|
||||
|
||||
- name: Upload Trivy scan results as an artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: trivy
|
||||
path: trivy-results.sarif
|
||||
|
||||
@@ -23,7 +23,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: stale
|
||||
uses: actions/stale@3a9db7e6a41a89f618792c92c0e97cc736e1b13f # v10.0.0
|
||||
uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0
|
||||
with:
|
||||
stale-issue-label: "stale"
|
||||
stale-pr-label: "stale"
|
||||
@@ -125,7 +125,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Delete PR Cleanup workflow runs
|
||||
uses: Mattraks/delete-workflow-runs@39f0bbed25d76b34de5594dceab824811479e5de # v2.0.6
|
||||
uses: Mattraks/delete-workflow-runs@ab482449ba468316e9a8801e092d0405715c5e6d # v2.1.0
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
repository: ${{ github.repository }}
|
||||
@@ -134,7 +134,7 @@ jobs:
|
||||
delete_workflow_pattern: pr-cleanup.yaml
|
||||
|
||||
- name: Delete PR Deploy workflow skipped runs
|
||||
uses: Mattraks/delete-workflow-runs@39f0bbed25d76b34de5594dceab824811479e5de # v2.0.6
|
||||
uses: Mattraks/delete-workflow-runs@ab482449ba468316e9a8801e092d0405715c5e6d # v2.1.0
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
repository: ${{ github.repository }}
|
||||
|
||||
@@ -13,12 +13,12 @@ on:
|
||||
template_name:
|
||||
description: "Coder template to use for workspace"
|
||||
required: true
|
||||
default: "traiage"
|
||||
default: "coder"
|
||||
type: string
|
||||
template_preset:
|
||||
description: "Template preset to use"
|
||||
required: true
|
||||
default: "Default"
|
||||
default: "none"
|
||||
type: string
|
||||
prefix:
|
||||
description: "Prefix for workspace name"
|
||||
@@ -66,8 +66,8 @@ jobs:
|
||||
GITHUB_EVENT_USER_ID: ${{ github.event.sender.id }}
|
||||
GITHUB_EVENT_USER_LOGIN: ${{ github.event.sender.login }}
|
||||
INPUTS_ISSUE_URL: ${{ inputs.issue_url }}
|
||||
INPUTS_TEMPLATE_NAME: ${{ inputs.template_name || 'traiage' }}
|
||||
INPUTS_TEMPLATE_PRESET: ${{ inputs.template_preset || 'Default'}}
|
||||
INPUTS_TEMPLATE_NAME: ${{ inputs.template_name || 'coder' }}
|
||||
INPUTS_TEMPLATE_PRESET: ${{ inputs.template_preset || 'none'}}
|
||||
INPUTS_PREFIX: ${{ inputs.prefix || 'traiage' }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
@@ -168,7 +168,7 @@ jobs:
|
||||
echo "coder_username=${coder_username}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -31,7 +31,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Check Markdown links
|
||||
uses: umbrelladocs/action-linkspector@874d01cae9fd488e3077b08952093235bd626977 # v1.3.7
|
||||
uses: umbrelladocs/action-linkspector@652f85bc57bb1e7d4327260decc10aa68f7694c3 # v1.4.0
|
||||
id: markdown-link-check
|
||||
# checks all markdown files from /docs including all subfolders
|
||||
with:
|
||||
|
||||
@@ -12,6 +12,9 @@ node_modules/
|
||||
vendor/
|
||||
yarn-error.log
|
||||
|
||||
# Test output files
|
||||
test-output/
|
||||
|
||||
# VSCode settings.
|
||||
**/.vscode/*
|
||||
# Allow VSCode recommendations and default settings in project root.
|
||||
@@ -86,3 +89,5 @@ result
|
||||
__debug_bin*
|
||||
|
||||
**/.claude/settings.local.json
|
||||
|
||||
/.env
|
||||
|
||||
+11
-1
@@ -169,6 +169,16 @@ linters-settings:
|
||||
- name: var-declaration
|
||||
- name: var-naming
|
||||
- name: waitgroup-by-value
|
||||
usetesting:
|
||||
# Only os-setenv is enabled because we migrated to usetesting from another linter that
|
||||
# only covered os-setenv.
|
||||
os-setenv: true
|
||||
os-create-temp: false
|
||||
os-mkdir-temp: false
|
||||
os-temp-dir: false
|
||||
os-chdir: false
|
||||
context-background: false
|
||||
context-todo: false
|
||||
|
||||
# irrelevant as of Go v1.22: https://go.dev/blog/loopvar-preview
|
||||
govet:
|
||||
@@ -252,7 +262,6 @@ linters:
|
||||
# - wastedassign
|
||||
|
||||
- staticcheck
|
||||
- tenv
|
||||
# In Go, it's possible for a package to test it's internal functionality
|
||||
# without testing any exported functions. This is enabled to promote
|
||||
# decomposing a package before testing it's internals. A function caller
|
||||
@@ -265,4 +274,5 @@ linters:
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
- usetesting
|
||||
- dupl
|
||||
|
||||
@@ -1,11 +1,41 @@
|
||||
# Coder Development Guidelines
|
||||
|
||||
You are an experienced, pragmatic software engineer. You don't over-engineer a solution when a simple one is possible.
|
||||
Rule #1: If you want exception to ANY rule, YOU MUST STOP and get explicit permission first. BREAKING THE LETTER OR SPIRIT OF THE RULES IS FAILURE.
|
||||
|
||||
## Foundational rules
|
||||
|
||||
- Doing it right is better than doing it fast. You are not in a rush. NEVER skip steps or take shortcuts.
|
||||
- Tedious, systematic work is often the correct solution. Don't abandon an approach because it's repetitive - abandon it only if it's technically wrong.
|
||||
- Honesty is a core value.
|
||||
|
||||
## Our relationship
|
||||
|
||||
- Act as a critical peer reviewer. Your job is to disagree with me when I'm wrong, not to please me. Prioritize accuracy and reasoning over agreement.
|
||||
- YOU MUST speak up immediately when you don't know something or we're in over our heads
|
||||
- YOU MUST call out bad ideas, unreasonable expectations, and mistakes - I depend on this
|
||||
- NEVER be agreeable just to be nice - I NEED your HONEST technical judgment
|
||||
- NEVER write the phrase "You're absolutely right!" You are not a sycophant. We're working together because I value your opinion. Do not agree with me unless you can justify it with evidence or reasoning.
|
||||
- YOU MUST ALWAYS STOP and ask for clarification rather than making assumptions.
|
||||
- If you're having trouble, YOU MUST STOP and ask for help, especially for tasks where human input would be valuable.
|
||||
- When you disagree with my approach, YOU MUST push back. Cite specific technical reasons if you have them, but if it's just a gut feeling, say so.
|
||||
- If you're uncomfortable pushing back out loud, just say "Houston, we have a problem". I'll know what you mean
|
||||
- We discuss architectutral decisions (framework changes, major refactoring, system design) together before implementation. Routine fixes and clear implementations don't need discussion.
|
||||
|
||||
## Proactiveness
|
||||
|
||||
When asked to do something, just do it - including obvious follow-up actions needed to complete the task properly.
|
||||
Only pause to ask for confirmation when:
|
||||
|
||||
- Multiple valid approaches exist and the choice matters
|
||||
- The action would delete or significantly restructure existing code
|
||||
- You genuinely don't understand what's being asked
|
||||
- Your partner asked a question (answer the question, don't jump to implementation)
|
||||
|
||||
@.claude/docs/WORKFLOWS.md
|
||||
@.cursorrules
|
||||
@README.md
|
||||
@package.json
|
||||
|
||||
## 🚀 Essential Commands
|
||||
## Essential Commands
|
||||
|
||||
| Task | Command | Notes |
|
||||
|-------------------|--------------------------|----------------------------------|
|
||||
@@ -21,22 +51,13 @@
|
||||
| **Format** | `make fmt` | Auto-format code |
|
||||
| **Clean** | `make clean` | Clean build artifacts |
|
||||
|
||||
### Frontend Commands (site directory)
|
||||
|
||||
- `pnpm build` - Build frontend
|
||||
- `pnpm dev` - Run development server
|
||||
- `pnpm check` - Run code checks
|
||||
- `pnpm format` - Format frontend code
|
||||
- `pnpm lint` - Lint frontend code
|
||||
- `pnpm test` - Run frontend tests
|
||||
|
||||
### Documentation Commands
|
||||
|
||||
- `pnpm run format-docs` - Format markdown tables in docs
|
||||
- `pnpm run lint-docs` - Lint and fix markdown files
|
||||
- `pnpm run storybook` - Run Storybook (from site directory)
|
||||
|
||||
## 🔧 Critical Patterns
|
||||
## Critical Patterns
|
||||
|
||||
### Database Changes (ALWAYS FOLLOW)
|
||||
|
||||
@@ -78,7 +99,7 @@ app, err := api.Database.GetOAuth2ProviderAppByClientID(dbauthz.AsSystemRestrict
|
||||
app, err := api.Database.GetOAuth2ProviderAppByClientID(ctx, clientID)
|
||||
```
|
||||
|
||||
## 📋 Quick Reference
|
||||
## Quick Reference
|
||||
|
||||
### Full workflows available in imported WORKFLOWS.md
|
||||
|
||||
@@ -88,14 +109,14 @@ app, err := api.Database.GetOAuth2ProviderAppByClientID(ctx, clientID)
|
||||
- [ ] Check if feature touches database - you'll need migrations
|
||||
- [ ] Check if feature touches audit logs - update `enterprise/audit/table.go`
|
||||
|
||||
## 🏗️ Architecture
|
||||
## Architecture
|
||||
|
||||
- **coderd**: Main API service
|
||||
- **provisionerd**: Infrastructure provisioning
|
||||
- **Agents**: Workspace services (SSH, port forwarding)
|
||||
- **Database**: PostgreSQL with `dbauthz` authorization
|
||||
|
||||
## 🧪 Testing
|
||||
## Testing
|
||||
|
||||
### Race Condition Prevention
|
||||
|
||||
@@ -112,21 +133,21 @@ app, err := api.Database.GetOAuth2ProviderAppByClientID(ctx, clientID)
|
||||
NEVER use `time.Sleep` to mitigate timing issues. If an issue
|
||||
seems like it should use `time.Sleep`, read through https://github.com/coder/quartz and specifically the [README](https://github.com/coder/quartz/blob/main/README.md) to better understand how to handle timing issues.
|
||||
|
||||
## 🎯 Code Style
|
||||
## Code Style
|
||||
|
||||
### Detailed guidelines in imported WORKFLOWS.md
|
||||
|
||||
- Follow [Uber Go Style Guide](https://github.com/uber-go/guide/blob/master/style.md)
|
||||
- Commit format: `type(scope): message`
|
||||
|
||||
## 📚 Detailed Development Guides
|
||||
## Detailed Development Guides
|
||||
|
||||
@.claude/docs/OAUTH2.md
|
||||
@.claude/docs/TESTING.md
|
||||
@.claude/docs/TROUBLESHOOTING.md
|
||||
@.claude/docs/DATABASE.md
|
||||
|
||||
## 🚨 Common Pitfalls
|
||||
## Common Pitfalls
|
||||
|
||||
1. **Audit table errors** → Update `enterprise/audit/table.go`
|
||||
2. **OAuth2 errors** → Return RFC-compliant format
|
||||
|
||||
-12
@@ -18,18 +18,6 @@ coderd/rbac/ @Emyrk
|
||||
scripts/apitypings/ @Emyrk
|
||||
scripts/gensite/ @aslilac
|
||||
|
||||
site/ @aslilac @Parkreiner
|
||||
site/src/hooks/ @Parkreiner
|
||||
# These rules intentionally do not specify any owners. More specific rules
|
||||
# override less specific rules, so these files are "ignored" by the site/ rule.
|
||||
site/e2e/google/protobuf/timestampGenerated.ts
|
||||
site/e2e/provisionerGenerated.ts
|
||||
site/src/api/countriesGenerated.ts
|
||||
site/src/api/rbacresourcesGenerated.ts
|
||||
site/src/api/typesGenerated.ts
|
||||
site/src/testHelpers/entities.ts
|
||||
site/CLAUDE.md
|
||||
|
||||
# The blood and guts of the autostop algorithm, which is quite complex and
|
||||
# requires elite ball knowledge of most of the scheduling code to make changes
|
||||
# without inadvertently affecting other parts of the codebase.
|
||||
|
||||
@@ -636,8 +636,8 @@ TAILNETTEST_MOCKS := \
|
||||
tailnet/tailnettest/subscriptionmock.go
|
||||
|
||||
AIBRIDGED_MOCKS := \
|
||||
enterprise/x/aibridged/aibridgedmock/clientmock.go \
|
||||
enterprise/x/aibridged/aibridgedmock/poolmock.go
|
||||
enterprise/aibridged/aibridgedmock/clientmock.go \
|
||||
enterprise/aibridged/aibridgedmock/poolmock.go
|
||||
|
||||
GEN_FILES := \
|
||||
tailnet/proto/tailnet.pb.go \
|
||||
@@ -645,7 +645,7 @@ GEN_FILES := \
|
||||
provisionersdk/proto/provisioner.pb.go \
|
||||
provisionerd/proto/provisionerd.pb.go \
|
||||
vpn/vpn.pb.go \
|
||||
enterprise/x/aibridged/proto/aibridged.pb.go \
|
||||
enterprise/aibridged/proto/aibridged.pb.go \
|
||||
$(DB_GEN_FILES) \
|
||||
$(SITE_GEN_FILES) \
|
||||
coderd/rbac/object_gen.go \
|
||||
@@ -676,6 +676,7 @@ gen/db: $(DB_GEN_FILES)
|
||||
.PHONY: gen/db
|
||||
|
||||
gen/golden-files: \
|
||||
agent/unit/testdata/.gen-golden \
|
||||
cli/testdata/.gen-golden \
|
||||
coderd/.gen-golden \
|
||||
coderd/notifications/.gen-golden \
|
||||
@@ -696,7 +697,7 @@ gen/mark-fresh:
|
||||
provisionersdk/proto/provisioner.pb.go \
|
||||
provisionerd/proto/provisionerd.pb.go \
|
||||
vpn/vpn.pb.go \
|
||||
enterprise/x/aibridged/proto/aibridged.pb.go \
|
||||
enterprise/aibridged/proto/aibridged.pb.go \
|
||||
coderd/database/dump.sql \
|
||||
$(DB_GEN_FILES) \
|
||||
site/src/api/typesGenerated.ts \
|
||||
@@ -767,8 +768,8 @@ codersdk/workspacesdk/agentconnmock/agentconnmock.go: codersdk/workspacesdk/agen
|
||||
go generate ./codersdk/workspacesdk/agentconnmock/
|
||||
touch "$@"
|
||||
|
||||
$(AIBRIDGED_MOCKS): enterprise/x/aibridged/client.go enterprise/x/aibridged/pool.go
|
||||
go generate ./enterprise/x/aibridged/aibridgedmock/
|
||||
$(AIBRIDGED_MOCKS): enterprise/aibridged/client.go enterprise/aibridged/pool.go
|
||||
go generate ./enterprise/aibridged/aibridgedmock/
|
||||
touch "$@"
|
||||
|
||||
agent/agentcontainers/dcspec/dcspec_gen.go: \
|
||||
@@ -821,13 +822,13 @@ vpn/vpn.pb.go: vpn/vpn.proto
|
||||
--go_opt=paths=source_relative \
|
||||
./vpn/vpn.proto
|
||||
|
||||
enterprise/x/aibridged/proto/aibridged.pb.go: enterprise/x/aibridged/proto/aibridged.proto
|
||||
enterprise/aibridged/proto/aibridged.pb.go: enterprise/aibridged/proto/aibridged.proto
|
||||
protoc \
|
||||
--go_out=. \
|
||||
--go_opt=paths=source_relative \
|
||||
--go-drpc_out=. \
|
||||
--go-drpc_opt=paths=source_relative \
|
||||
./enterprise/x/aibridged/proto/aibridged.proto
|
||||
./enterprise/aibridged/proto/aibridged.proto
|
||||
|
||||
site/src/api/typesGenerated.ts: site/node_modules/.installed $(wildcard scripts/apitypings/*) $(shell find ./codersdk $(FIND_EXCLUSIONS) -type f -name '*.go')
|
||||
# -C sets the directory for the go run command
|
||||
@@ -952,6 +953,10 @@ clean/golden-files:
|
||||
-type f -name '*.golden' -delete
|
||||
.PHONY: clean/golden-files
|
||||
|
||||
agent/unit/testdata/.gen-golden: $(wildcard agent/unit/testdata/*.golden) $(GO_SRC_FILES) $(wildcard agent/unit/*_test.go)
|
||||
TZ=UTC go test ./agent/unit -run="TestGraph" -update
|
||||
touch "$@"
|
||||
|
||||
cli/testdata/.gen-golden: $(wildcard cli/testdata/*.golden) $(wildcard cli/*.tpl) $(GO_SRC_FILES) $(wildcard cli/*_test.go)
|
||||
TZ=UTC go test ./cli -run="Test(CommandHelp|ServerYAML|ErrorExamples|.*Golden)" -update
|
||||
touch "$@"
|
||||
@@ -1177,3 +1182,8 @@ endif
|
||||
|
||||
dogfood/coder/nix.hash: flake.nix flake.lock
|
||||
sha256sum flake.nix flake.lock >./dogfood/coder/nix.hash
|
||||
|
||||
# Count the number of test databases created per test package.
|
||||
count-test-databases:
|
||||
PGPASSWORD=postgres psql -h localhost -U postgres -d coder_testing -P pager=off -c 'SELECT test_package, count(*) as count from test_databases GROUP BY test_package ORDER BY count DESC'
|
||||
.PHONY: count-test-databases
|
||||
|
||||
+76
-33
@@ -3462,11 +3462,7 @@ func TestAgent_Metrics_SSH(t *testing.T) {
|
||||
registry := prometheus.NewRegistry()
|
||||
|
||||
//nolint:dogsled
|
||||
conn, _, _, _, _ := setupAgent(t, agentsdk.Manifest{
|
||||
// Make sure we always get a DERP connection for
|
||||
// currently_reachable_peers.
|
||||
DisableDirectConnections: true,
|
||||
}, 0, func(_ *agenttest.Client, o *agent.Options) {
|
||||
conn, _, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
|
||||
o.PrometheusRegistry = registry
|
||||
})
|
||||
|
||||
@@ -3481,16 +3477,31 @@ func TestAgent_Metrics_SSH(t *testing.T) {
|
||||
err = session.Shell()
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := []*proto.Stats_Metric{
|
||||
expected := []struct {
|
||||
Name string
|
||||
Type proto.Stats_Metric_Type
|
||||
CheckFn func(float64) error
|
||||
Labels []*proto.Stats_Metric_Label
|
||||
}{
|
||||
{
|
||||
Name: "agent_reconnecting_pty_connections_total",
|
||||
Type: proto.Stats_Metric_COUNTER,
|
||||
Value: 0,
|
||||
Name: "agent_reconnecting_pty_connections_total",
|
||||
Type: proto.Stats_Metric_COUNTER,
|
||||
CheckFn: func(v float64) error {
|
||||
if v == 0 {
|
||||
return nil
|
||||
}
|
||||
return xerrors.Errorf("expected 0, got %f", v)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "agent_sessions_total",
|
||||
Type: proto.Stats_Metric_COUNTER,
|
||||
Value: 1,
|
||||
Name: "agent_sessions_total",
|
||||
Type: proto.Stats_Metric_COUNTER,
|
||||
CheckFn: func(v float64) error {
|
||||
if v == 1 {
|
||||
return nil
|
||||
}
|
||||
return xerrors.Errorf("expected 1, got %f", v)
|
||||
},
|
||||
Labels: []*proto.Stats_Metric_Label{
|
||||
{
|
||||
Name: "magic_type",
|
||||
@@ -3503,24 +3514,44 @@ func TestAgent_Metrics_SSH(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "agent_ssh_server_failed_connections_total",
|
||||
Type: proto.Stats_Metric_COUNTER,
|
||||
Value: 0,
|
||||
Name: "agent_ssh_server_failed_connections_total",
|
||||
Type: proto.Stats_Metric_COUNTER,
|
||||
CheckFn: func(v float64) error {
|
||||
if v == 0 {
|
||||
return nil
|
||||
}
|
||||
return xerrors.Errorf("expected 0, got %f", v)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "agent_ssh_server_sftp_connections_total",
|
||||
Type: proto.Stats_Metric_COUNTER,
|
||||
Value: 0,
|
||||
Name: "agent_ssh_server_sftp_connections_total",
|
||||
Type: proto.Stats_Metric_COUNTER,
|
||||
CheckFn: func(v float64) error {
|
||||
if v == 0 {
|
||||
return nil
|
||||
}
|
||||
return xerrors.Errorf("expected 0, got %f", v)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "agent_ssh_server_sftp_server_errors_total",
|
||||
Type: proto.Stats_Metric_COUNTER,
|
||||
Value: 0,
|
||||
Name: "agent_ssh_server_sftp_server_errors_total",
|
||||
Type: proto.Stats_Metric_COUNTER,
|
||||
CheckFn: func(v float64) error {
|
||||
if v == 0 {
|
||||
return nil
|
||||
}
|
||||
return xerrors.Errorf("expected 0, got %f", v)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "coderd_agentstats_currently_reachable_peers",
|
||||
Type: proto.Stats_Metric_GAUGE,
|
||||
Value: 1,
|
||||
Name: "coderd_agentstats_currently_reachable_peers",
|
||||
Type: proto.Stats_Metric_GAUGE,
|
||||
CheckFn: func(float64) error {
|
||||
// We can't reliably ping a peer here, and networking is out of
|
||||
// scope of this test, so we just test that the metric exists
|
||||
// with the correct labels.
|
||||
return nil
|
||||
},
|
||||
Labels: []*proto.Stats_Metric_Label{
|
||||
{
|
||||
Name: "connection_type",
|
||||
@@ -3529,9 +3560,11 @@ func TestAgent_Metrics_SSH(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "coderd_agentstats_currently_reachable_peers",
|
||||
Type: proto.Stats_Metric_GAUGE,
|
||||
Value: 0,
|
||||
Name: "coderd_agentstats_currently_reachable_peers",
|
||||
Type: proto.Stats_Metric_GAUGE,
|
||||
CheckFn: func(float64) error {
|
||||
return nil
|
||||
},
|
||||
Labels: []*proto.Stats_Metric_Label{
|
||||
{
|
||||
Name: "connection_type",
|
||||
@@ -3540,9 +3573,20 @@ func TestAgent_Metrics_SSH(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "coderd_agentstats_startup_script_seconds",
|
||||
Type: proto.Stats_Metric_GAUGE,
|
||||
Value: 1,
|
||||
Name: "coderd_agentstats_startup_script_seconds",
|
||||
Type: proto.Stats_Metric_GAUGE,
|
||||
CheckFn: func(f float64) error {
|
||||
if f >= 0 {
|
||||
return nil
|
||||
}
|
||||
return xerrors.Errorf("expected >= 0, got %f", f)
|
||||
},
|
||||
Labels: []*proto.Stats_Metric_Label{
|
||||
{
|
||||
Name: "success",
|
||||
Value: "true",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -3564,11 +3608,10 @@ func TestAgent_Metrics_SSH(t *testing.T) {
|
||||
for _, m := range mf.GetMetric() {
|
||||
assert.Equal(t, expected[i].Name, mf.GetName())
|
||||
assert.Equal(t, expected[i].Type.String(), mf.GetType().String())
|
||||
// Value is max expected
|
||||
if expected[i].Type == proto.Stats_Metric_GAUGE {
|
||||
assert.GreaterOrEqualf(t, expected[i].Value, m.GetGauge().GetValue(), "expected %s to be greater than or equal to %f, got %f", expected[i].Name, expected[i].Value, m.GetGauge().GetValue())
|
||||
assert.NoError(t, expected[i].CheckFn(m.GetGauge().GetValue()), "check fn for %s failed", expected[i].Name)
|
||||
} else if expected[i].Type == proto.Stats_Metric_COUNTER {
|
||||
assert.GreaterOrEqualf(t, expected[i].Value, m.GetCounter().GetValue(), "expected %s to be greater than or equal to %f, got %f", expected[i].Name, expected[i].Value, m.GetCounter().GetValue())
|
||||
assert.NoError(t, expected[i].CheckFn(m.GetCounter().GetValue()), "check fn for %s failed", expected[i].Name)
|
||||
}
|
||||
for j, lbl := range expected[i].Labels {
|
||||
assert.Equal(t, m.GetLabel()[j], &promgo.LabelPair{
|
||||
|
||||
@@ -682,8 +682,6 @@ func (api *API) updaterLoop() {
|
||||
} else {
|
||||
prevErr = nil
|
||||
}
|
||||
default:
|
||||
api.logger.Debug(api.ctx, "updater loop ticker skipped, update in progress")
|
||||
}
|
||||
|
||||
return nil // Always nil to keep the ticker going.
|
||||
|
||||
+3
-1
@@ -250,7 +250,9 @@ func (a *agent) editFile(ctx context.Context, path string, edits []workspacesdk.
|
||||
transforms[i] = replace.String(edit.Search, edit.Replace)
|
||||
}
|
||||
|
||||
tmpfile, err := afero.TempFile(a.filesystem, "", filepath.Base(path))
|
||||
// Create an adjacent file to ensure it will be on the same device and can be
|
||||
// moved atomically.
|
||||
tmpfile, err := afero.TempFile(a.filesystem, filepath.Dir(path), filepath.Base(path))
|
||||
if err != nil {
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
|
||||
@@ -0,0 +1,174 @@
|
||||
package unit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
"gonum.org/v1/gonum/graph/encoding/dot"
|
||||
"gonum.org/v1/gonum/graph/simple"
|
||||
"gonum.org/v1/gonum/graph/topo"
|
||||
)
|
||||
|
||||
// Graph provides a bidirectional interface over gonum's directed graph implementation.
|
||||
// While the underlying gonum graph is directed, we overlay bidirectional semantics
|
||||
// by distinguishing between forward and reverse edges. Wanting and being wanted by
|
||||
// other units are related but different concepts that have different graph traversal
|
||||
// implications when Units update their status.
|
||||
//
|
||||
// The graph stores edge types to represent different relationships between units,
|
||||
// allowing for domain-specific semantics beyond simple connectivity.
|
||||
type Graph[EdgeType, VertexType comparable] struct {
|
||||
mu sync.RWMutex
|
||||
// The underlying gonum graph. It stores vertices and edges without knowing about the types of the vertices and edges.
|
||||
gonumGraph *simple.DirectedGraph
|
||||
// Maps vertices to their IDs so that a gonum vertex ID can be used to lookup the vertex type.
|
||||
vertexToID map[VertexType]int64
|
||||
// Maps vertex IDs to their types so that a vertex type can be used to lookup the gonum vertex ID.
|
||||
idToVertex map[int64]VertexType
|
||||
// The next ID to assign to a vertex.
|
||||
nextID int64
|
||||
// Store edge types by "fromID->toID" key. This is used to lookup the edge type for a given edge.
|
||||
edgeTypes map[string]EdgeType
|
||||
}
|
||||
|
||||
// Edge is a convenience type for representing an edge in the graph.
|
||||
// It encapsulates the from and to vertices and the edge type itself.
|
||||
type Edge[EdgeType, VertexType comparable] struct {
|
||||
From VertexType
|
||||
To VertexType
|
||||
Edge EdgeType
|
||||
}
|
||||
|
||||
// AddEdge adds an edge to the graph. It initializes the graph and metadata on first use,
|
||||
// checks for cycles, and adds the edge to the gonum graph.
|
||||
func (g *Graph[EdgeType, VertexType]) AddEdge(from, to VertexType, edge EdgeType) error {
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
|
||||
if g.gonumGraph == nil {
|
||||
g.gonumGraph = simple.NewDirectedGraph()
|
||||
g.vertexToID = make(map[VertexType]int64)
|
||||
g.idToVertex = make(map[int64]VertexType)
|
||||
g.edgeTypes = make(map[string]EdgeType)
|
||||
g.nextID = 1
|
||||
}
|
||||
|
||||
fromID := g.getOrCreateVertexID(from)
|
||||
toID := g.getOrCreateVertexID(to)
|
||||
|
||||
if g.canReach(to, from) {
|
||||
return xerrors.Errorf("adding edge (%v -> %v) would create a cycle", from, to)
|
||||
}
|
||||
|
||||
g.gonumGraph.SetEdge(simple.Edge{F: simple.Node(fromID), T: simple.Node(toID)})
|
||||
|
||||
edgeKey := fmt.Sprintf("%d->%d", fromID, toID)
|
||||
g.edgeTypes[edgeKey] = edge
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetForwardAdjacentVertices returns all the edges that originate from the given vertex.
|
||||
func (g *Graph[EdgeType, VertexType]) GetForwardAdjacentVertices(from VertexType) []Edge[EdgeType, VertexType] {
|
||||
g.mu.RLock()
|
||||
defer g.mu.RUnlock()
|
||||
|
||||
fromID, exists := g.vertexToID[from]
|
||||
if !exists {
|
||||
return []Edge[EdgeType, VertexType]{}
|
||||
}
|
||||
|
||||
edges := []Edge[EdgeType, VertexType]{}
|
||||
toNodes := g.gonumGraph.From(fromID)
|
||||
for toNodes.Next() {
|
||||
toID := toNodes.Node().ID()
|
||||
to := g.idToVertex[toID]
|
||||
|
||||
// Get the edge type
|
||||
edgeKey := fmt.Sprintf("%d->%d", fromID, toID)
|
||||
edgeType := g.edgeTypes[edgeKey]
|
||||
|
||||
edges = append(edges, Edge[EdgeType, VertexType]{From: from, To: to, Edge: edgeType})
|
||||
}
|
||||
|
||||
return edges
|
||||
}
|
||||
|
||||
// GetReverseAdjacentVertices returns all the edges that terminate at the given vertex.
|
||||
func (g *Graph[EdgeType, VertexType]) GetReverseAdjacentVertices(to VertexType) []Edge[EdgeType, VertexType] {
|
||||
g.mu.RLock()
|
||||
defer g.mu.RUnlock()
|
||||
|
||||
toID, exists := g.vertexToID[to]
|
||||
if !exists {
|
||||
return []Edge[EdgeType, VertexType]{}
|
||||
}
|
||||
|
||||
edges := []Edge[EdgeType, VertexType]{}
|
||||
fromNodes := g.gonumGraph.To(toID)
|
||||
for fromNodes.Next() {
|
||||
fromID := fromNodes.Node().ID()
|
||||
from := g.idToVertex[fromID]
|
||||
|
||||
// Get the edge type
|
||||
edgeKey := fmt.Sprintf("%d->%d", fromID, toID)
|
||||
edgeType := g.edgeTypes[edgeKey]
|
||||
|
||||
edges = append(edges, Edge[EdgeType, VertexType]{From: from, To: to, Edge: edgeType})
|
||||
}
|
||||
|
||||
return edges
|
||||
}
|
||||
|
||||
// getOrCreateVertexID returns the ID for a vertex, creating it if it doesn't exist.
|
||||
func (g *Graph[EdgeType, VertexType]) getOrCreateVertexID(vertex VertexType) int64 {
|
||||
if id, exists := g.vertexToID[vertex]; exists {
|
||||
return id
|
||||
}
|
||||
|
||||
id := g.nextID
|
||||
g.nextID++
|
||||
g.vertexToID[vertex] = id
|
||||
g.idToVertex[id] = vertex
|
||||
|
||||
// Add the node to the gonum graph
|
||||
g.gonumGraph.AddNode(simple.Node(id))
|
||||
|
||||
return id
|
||||
}
|
||||
|
||||
// canReach checks if there is a path from the start vertex to the end vertex.
|
||||
func (g *Graph[EdgeType, VertexType]) canReach(start, end VertexType) bool {
|
||||
if start == end {
|
||||
return true
|
||||
}
|
||||
|
||||
startID, startExists := g.vertexToID[start]
|
||||
endID, endExists := g.vertexToID[end]
|
||||
|
||||
if !startExists || !endExists {
|
||||
return false
|
||||
}
|
||||
|
||||
// Use gonum's built-in path existence check
|
||||
return topo.PathExistsIn(g.gonumGraph, simple.Node(startID), simple.Node(endID))
|
||||
}
|
||||
|
||||
// ToDOT exports the graph to DOT format for visualization
|
||||
func (g *Graph[EdgeType, VertexType]) ToDOT(name string) (string, error) {
|
||||
g.mu.RLock()
|
||||
defer g.mu.RUnlock()
|
||||
|
||||
if g.gonumGraph == nil {
|
||||
return "", xerrors.New("graph is not initialized")
|
||||
}
|
||||
|
||||
// Marshal the graph to DOT format
|
||||
dotBytes, err := dot.Marshal(g.gonumGraph, name, "", " ")
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("failed to marshal graph to DOT: %w", err)
|
||||
}
|
||||
|
||||
return string(dotBytes), nil
|
||||
}
|
||||
@@ -0,0 +1,454 @@
|
||||
// Package unit_test provides tests for the unit package.
|
||||
//
|
||||
// DOT Graph Testing:
|
||||
// The graph tests use golden files for DOT representation verification.
|
||||
// To update the golden files:
|
||||
// make gen/golden-files
|
||||
//
|
||||
// The golden files contain the expected DOT representation and can be easily
|
||||
// inspected, version controlled, and updated when the graph structure changes.
|
||||
package unit_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/agent/unit"
|
||||
"github.com/coder/coder/v2/cryptorand"
|
||||
)
|
||||
|
||||
type testGraphEdge string
|
||||
|
||||
const (
|
||||
testEdgeStarted testGraphEdge = "started"
|
||||
testEdgeCompleted testGraphEdge = "completed"
|
||||
)
|
||||
|
||||
type testGraphVertex struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
type (
|
||||
testGraph = unit.Graph[testGraphEdge, *testGraphVertex]
|
||||
testEdge = unit.Edge[testGraphEdge, *testGraphVertex]
|
||||
)
|
||||
|
||||
// randInt generates a random integer in the range [0, limit).
|
||||
func randInt(limit int) int {
|
||||
if limit <= 0 {
|
||||
return 0
|
||||
}
|
||||
n, err := cryptorand.Int63n(int64(limit))
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return int(n)
|
||||
}
|
||||
|
||||
// UpdateGoldenFiles indicates golden files should be updated.
|
||||
// To update the golden files:
|
||||
// make gen/golden-files
|
||||
var UpdateGoldenFiles = flag.Bool("update", false, "update .golden files")
|
||||
|
||||
// assertDOTGraph requires that the graph's DOT representation matches the golden file
|
||||
func assertDOTGraph(t *testing.T, graph *testGraph, goldenName string) {
|
||||
t.Helper()
|
||||
|
||||
dot, err := graph.ToDOT(goldenName)
|
||||
require.NoError(t, err)
|
||||
|
||||
goldenFile := filepath.Join("testdata", goldenName+".golden")
|
||||
if *UpdateGoldenFiles {
|
||||
t.Logf("update golden file for: %q: %s", goldenName, goldenFile)
|
||||
err := os.MkdirAll(filepath.Dir(goldenFile), 0o755)
|
||||
require.NoError(t, err, "want no error creating golden file directory")
|
||||
err = os.WriteFile(goldenFile, []byte(dot), 0o600)
|
||||
require.NoError(t, err, "update golden file")
|
||||
}
|
||||
|
||||
expected, err := os.ReadFile(goldenFile)
|
||||
require.NoError(t, err, "read golden file, run \"make gen/golden-files\" and commit the changes")
|
||||
|
||||
// Normalize line endings for cross-platform compatibility
|
||||
expected = normalizeLineEndings(expected)
|
||||
normalizedDot := normalizeLineEndings([]byte(dot))
|
||||
|
||||
assert.Empty(t, cmp.Diff(string(expected), string(normalizedDot)), "golden file mismatch (-want +got): %s, run \"make gen/golden-files\", verify and commit the changes", goldenFile)
|
||||
}
|
||||
|
||||
// normalizeLineEndings ensures that all line endings are normalized to \n.
|
||||
// Required for Windows compatibility.
|
||||
func normalizeLineEndings(content []byte) []byte {
|
||||
content = bytes.ReplaceAll(content, []byte("\r\n"), []byte("\n"))
|
||||
content = bytes.ReplaceAll(content, []byte("\r"), []byte("\n"))
|
||||
return content
|
||||
}
|
||||
|
||||
func TestGraph(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testFuncs := map[string]func(t *testing.T) *unit.Graph[testGraphEdge, *testGraphVertex]{
|
||||
"ForwardAndReverseEdges": func(t *testing.T) *unit.Graph[testGraphEdge, *testGraphVertex] {
|
||||
graph := &unit.Graph[testGraphEdge, *testGraphVertex]{}
|
||||
unit1 := &testGraphVertex{Name: "unit1"}
|
||||
unit2 := &testGraphVertex{Name: "unit2"}
|
||||
unit3 := &testGraphVertex{Name: "unit3"}
|
||||
err := graph.AddEdge(unit1, unit2, testEdgeCompleted)
|
||||
require.NoError(t, err)
|
||||
err = graph.AddEdge(unit1, unit3, testEdgeStarted)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check for forward edge
|
||||
vertices := graph.GetForwardAdjacentVertices(unit1)
|
||||
require.Len(t, vertices, 2)
|
||||
// Unit 1 depends on the completion of Unit2
|
||||
require.Contains(t, vertices, testEdge{
|
||||
From: unit1,
|
||||
To: unit2,
|
||||
Edge: testEdgeCompleted,
|
||||
})
|
||||
// Unit 1 depends on the start of Unit3
|
||||
require.Contains(t, vertices, testEdge{
|
||||
From: unit1,
|
||||
To: unit3,
|
||||
Edge: testEdgeStarted,
|
||||
})
|
||||
|
||||
// Check for reverse edges
|
||||
unit2ReverseEdges := graph.GetReverseAdjacentVertices(unit2)
|
||||
require.Len(t, unit2ReverseEdges, 1)
|
||||
// Unit 2 must be completed before Unit 1 can start
|
||||
require.Contains(t, unit2ReverseEdges, testEdge{
|
||||
From: unit1,
|
||||
To: unit2,
|
||||
Edge: testEdgeCompleted,
|
||||
})
|
||||
|
||||
unit3ReverseEdges := graph.GetReverseAdjacentVertices(unit3)
|
||||
require.Len(t, unit3ReverseEdges, 1)
|
||||
// Unit 3 must be started before Unit 1 can complete
|
||||
require.Contains(t, unit3ReverseEdges, testEdge{
|
||||
From: unit1,
|
||||
To: unit3,
|
||||
Edge: testEdgeStarted,
|
||||
})
|
||||
|
||||
return graph
|
||||
},
|
||||
"SelfReference": func(t *testing.T) *testGraph {
|
||||
graph := &testGraph{}
|
||||
unit1 := &testGraphVertex{Name: "unit1"}
|
||||
err := graph.AddEdge(unit1, unit1, testEdgeCompleted)
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, fmt.Sprintf("adding edge (%v -> %v) would create a cycle", unit1, unit1))
|
||||
|
||||
return graph
|
||||
},
|
||||
"Cycle": func(t *testing.T) *testGraph {
|
||||
graph := &testGraph{}
|
||||
unit1 := &testGraphVertex{Name: "unit1"}
|
||||
unit2 := &testGraphVertex{Name: "unit2"}
|
||||
err := graph.AddEdge(unit1, unit2, testEdgeCompleted)
|
||||
require.NoError(t, err)
|
||||
err = graph.AddEdge(unit2, unit1, testEdgeStarted)
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, fmt.Sprintf("adding edge (%v -> %v) would create a cycle", unit2, unit1))
|
||||
|
||||
return graph
|
||||
},
|
||||
"MultipleDependenciesSameStatus": func(t *testing.T) *testGraph {
|
||||
graph := &testGraph{}
|
||||
unit1 := &testGraphVertex{Name: "unit1"}
|
||||
unit2 := &testGraphVertex{Name: "unit2"}
|
||||
unit3 := &testGraphVertex{Name: "unit3"}
|
||||
unit4 := &testGraphVertex{Name: "unit4"}
|
||||
|
||||
// Unit1 depends on completion of both unit2 and unit3 (same status type)
|
||||
err := graph.AddEdge(unit1, unit2, testEdgeCompleted)
|
||||
require.NoError(t, err)
|
||||
err = graph.AddEdge(unit1, unit3, testEdgeCompleted)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Unit1 also depends on starting of unit4 (different status type)
|
||||
err = graph.AddEdge(unit1, unit4, testEdgeStarted)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that unit1 has 3 forward dependencies
|
||||
forwardEdges := graph.GetForwardAdjacentVertices(unit1)
|
||||
require.Len(t, forwardEdges, 3)
|
||||
|
||||
// Verify all expected dependencies exist
|
||||
expectedDependencies := []testEdge{
|
||||
{From: unit1, To: unit2, Edge: testEdgeCompleted},
|
||||
{From: unit1, To: unit3, Edge: testEdgeCompleted},
|
||||
{From: unit1, To: unit4, Edge: testEdgeStarted},
|
||||
}
|
||||
|
||||
for _, expected := range expectedDependencies {
|
||||
require.Contains(t, forwardEdges, expected)
|
||||
}
|
||||
|
||||
// Check reverse dependencies
|
||||
unit2ReverseEdges := graph.GetReverseAdjacentVertices(unit2)
|
||||
require.Len(t, unit2ReverseEdges, 1)
|
||||
require.Contains(t, unit2ReverseEdges, testEdge{
|
||||
From: unit1, To: unit2, Edge: testEdgeCompleted,
|
||||
})
|
||||
|
||||
unit3ReverseEdges := graph.GetReverseAdjacentVertices(unit3)
|
||||
require.Len(t, unit3ReverseEdges, 1)
|
||||
require.Contains(t, unit3ReverseEdges, testEdge{
|
||||
From: unit1, To: unit3, Edge: testEdgeCompleted,
|
||||
})
|
||||
|
||||
unit4ReverseEdges := graph.GetReverseAdjacentVertices(unit4)
|
||||
require.Len(t, unit4ReverseEdges, 1)
|
||||
require.Contains(t, unit4ReverseEdges, testEdge{
|
||||
From: unit1, To: unit4, Edge: testEdgeStarted,
|
||||
})
|
||||
|
||||
return graph
|
||||
},
|
||||
}
|
||||
|
||||
for testName, testFunc := range testFuncs {
|
||||
var graph *testGraph
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
graph = testFunc(t)
|
||||
assertDOTGraph(t, graph, testName)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGraphThreadSafety(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("ConcurrentReadWrite", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
graph := &testGraph{}
|
||||
var wg sync.WaitGroup
|
||||
const numWriters = 50
|
||||
const numReaders = 100
|
||||
const operationsPerWriter = 1000
|
||||
const operationsPerReader = 2000
|
||||
|
||||
barrier := make(chan struct{})
|
||||
// Launch writers
|
||||
for i := 0; i < numWriters; i++ {
|
||||
wg.Add(1)
|
||||
go func(writerID int) {
|
||||
defer wg.Done()
|
||||
<-barrier
|
||||
for j := 0; j < operationsPerWriter; j++ {
|
||||
from := &testGraphVertex{Name: fmt.Sprintf("writer-%d-%d", writerID, j)}
|
||||
to := &testGraphVertex{Name: fmt.Sprintf("writer-%d-%d", writerID, j+1)}
|
||||
graph.AddEdge(from, to, testEdgeCompleted)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Launch readers
|
||||
readerResults := make([]struct {
|
||||
panicked bool
|
||||
readCount int
|
||||
}, numReaders)
|
||||
|
||||
for i := 0; i < numReaders; i++ {
|
||||
wg.Add(1)
|
||||
go func(readerID int) {
|
||||
defer wg.Done()
|
||||
<-barrier
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
readerResults[readerID].panicked = true
|
||||
}
|
||||
}()
|
||||
|
||||
readCount := 0
|
||||
for j := 0; j < operationsPerReader; j++ {
|
||||
// Create a test vertex and read
|
||||
testUnit := &testGraphVertex{Name: fmt.Sprintf("test-reader-%d-%d", readerID, j)}
|
||||
forwardEdges := graph.GetForwardAdjacentVertices(testUnit)
|
||||
reverseEdges := graph.GetReverseAdjacentVertices(testUnit)
|
||||
|
||||
// Just verify no panics (results may be nil for non-existent vertices)
|
||||
_ = forwardEdges
|
||||
_ = reverseEdges
|
||||
readCount++
|
||||
}
|
||||
readerResults[readerID].readCount = readCount
|
||||
}(i)
|
||||
}
|
||||
|
||||
close(barrier)
|
||||
wg.Wait()
|
||||
|
||||
// Verify no panics occurred in readers
|
||||
for i, result := range readerResults {
|
||||
require.False(t, result.panicked, "reader %d panicked", i)
|
||||
require.Equal(t, operationsPerReader, result.readCount, "reader %d should have performed expected reads", i)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("ConcurrentCycleDetection", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
graph := &testGraph{}
|
||||
|
||||
// Pre-create chain: A→B→C→D
|
||||
unitA := &testGraphVertex{Name: "A"}
|
||||
unitB := &testGraphVertex{Name: "B"}
|
||||
unitC := &testGraphVertex{Name: "C"}
|
||||
unitD := &testGraphVertex{Name: "D"}
|
||||
|
||||
err := graph.AddEdge(unitA, unitB, testEdgeCompleted)
|
||||
require.NoError(t, err)
|
||||
err = graph.AddEdge(unitB, unitC, testEdgeCompleted)
|
||||
require.NoError(t, err)
|
||||
err = graph.AddEdge(unitC, unitD, testEdgeCompleted)
|
||||
require.NoError(t, err)
|
||||
|
||||
barrier := make(chan struct{})
|
||||
var wg sync.WaitGroup
|
||||
const numGoroutines = 50
|
||||
cycleErrors := make([]error, numGoroutines)
|
||||
|
||||
// Launch goroutines trying to add D→A (creates cycle)
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(goroutineID int) {
|
||||
defer wg.Done()
|
||||
<-barrier
|
||||
err := graph.AddEdge(unitD, unitA, testEdgeCompleted)
|
||||
cycleErrors[goroutineID] = err
|
||||
}(i)
|
||||
}
|
||||
|
||||
close(barrier)
|
||||
wg.Wait()
|
||||
|
||||
// Verify all attempts correctly returned cycle error
|
||||
for i, err := range cycleErrors {
|
||||
require.Error(t, err, "goroutine %d should have detected cycle", i)
|
||||
require.Contains(t, err.Error(), "would create a cycle")
|
||||
}
|
||||
|
||||
// Verify graph remains valid (original chain intact)
|
||||
dot, err := graph.ToDOT("test")
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, dot)
|
||||
})
|
||||
|
||||
t.Run("ConcurrentToDOT", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
graph := &testGraph{}
|
||||
|
||||
// Pre-populate graph
|
||||
for i := 0; i < 20; i++ {
|
||||
from := &testGraphVertex{Name: fmt.Sprintf("dot-unit-%d", i)}
|
||||
to := &testGraphVertex{Name: fmt.Sprintf("dot-unit-%d", i+1)}
|
||||
err := graph.AddEdge(from, to, testEdgeCompleted)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
barrier := make(chan struct{})
|
||||
var wg sync.WaitGroup
|
||||
const numReaders = 100
|
||||
const numWriters = 20
|
||||
dotResults := make([]string, numReaders)
|
||||
|
||||
// Launch readers calling ToDOT
|
||||
dotErrors := make([]error, numReaders)
|
||||
for i := 0; i < numReaders; i++ {
|
||||
wg.Add(1)
|
||||
go func(readerID int) {
|
||||
defer wg.Done()
|
||||
<-barrier
|
||||
dot, err := graph.ToDOT(fmt.Sprintf("test-%d", readerID))
|
||||
dotErrors[readerID] = err
|
||||
if err == nil {
|
||||
dotResults[readerID] = dot
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Launch writers adding edges
|
||||
for i := 0; i < numWriters; i++ {
|
||||
wg.Add(1)
|
||||
go func(writerID int) {
|
||||
defer wg.Done()
|
||||
<-barrier
|
||||
from := &testGraphVertex{Name: fmt.Sprintf("writer-dot-%d", writerID)}
|
||||
to := &testGraphVertex{Name: fmt.Sprintf("writer-dot-target-%d", writerID)}
|
||||
graph.AddEdge(from, to, testEdgeCompleted)
|
||||
}(i)
|
||||
}
|
||||
|
||||
close(barrier)
|
||||
wg.Wait()
|
||||
|
||||
// Verify no errors occurred during DOT generation
|
||||
for i, err := range dotErrors {
|
||||
require.NoError(t, err, "DOT generation error at index %d", i)
|
||||
}
|
||||
|
||||
// Verify all DOT results are valid
|
||||
for i, dot := range dotResults {
|
||||
require.NotEmpty(t, dot, "DOT result %d should not be empty", i)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkGraph_ConcurrentMixedOperations(b *testing.B) {
|
||||
graph := &testGraph{}
|
||||
var wg sync.WaitGroup
|
||||
const numGoroutines = 200
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Launch goroutines performing random operations
|
||||
for j := 0; j < numGoroutines; j++ {
|
||||
wg.Add(1)
|
||||
go func(goroutineID int) {
|
||||
defer wg.Done()
|
||||
operationCount := 0
|
||||
|
||||
for operationCount < 50 {
|
||||
operation := float32(randInt(100)) / 100.0
|
||||
|
||||
if operation < 0.6 { // 60% reads
|
||||
// Read operation
|
||||
testUnit := &testGraphVertex{Name: fmt.Sprintf("bench-read-%d-%d", goroutineID, operationCount)}
|
||||
forwardEdges := graph.GetForwardAdjacentVertices(testUnit)
|
||||
reverseEdges := graph.GetReverseAdjacentVertices(testUnit)
|
||||
|
||||
// Just verify no panics (results may be nil for non-existent vertices)
|
||||
_ = forwardEdges
|
||||
_ = reverseEdges
|
||||
} else { // 40% writes
|
||||
// Write operation
|
||||
from := &testGraphVertex{Name: fmt.Sprintf("bench-write-%d-%d", goroutineID, operationCount)}
|
||||
to := &testGraphVertex{Name: fmt.Sprintf("bench-write-target-%d-%d", goroutineID, operationCount)}
|
||||
graph.AddEdge(from, to, testEdgeCompleted)
|
||||
}
|
||||
|
||||
operationCount++
|
||||
}
|
||||
}(j)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
Vendored
+8
@@ -0,0 +1,8 @@
|
||||
strict digraph Cycle {
|
||||
// Node definitions.
|
||||
1;
|
||||
2;
|
||||
|
||||
// Edge definitions.
|
||||
1 -> 2;
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
strict digraph ForwardAndReverseEdges {
|
||||
// Node definitions.
|
||||
1;
|
||||
2;
|
||||
3;
|
||||
|
||||
// Edge definitions.
|
||||
1 -> 2;
|
||||
1 -> 3;
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
strict digraph MultipleDependenciesSameStatus {
|
||||
// Node definitions.
|
||||
1;
|
||||
2;
|
||||
3;
|
||||
4;
|
||||
|
||||
// Edge definitions.
|
||||
1 -> 2;
|
||||
1 -> 3;
|
||||
1 -> 4;
|
||||
}
|
||||
+4
@@ -0,0 +1,4 @@
|
||||
strict digraph SelfReference {
|
||||
// Node definitions.
|
||||
1;
|
||||
}
|
||||
@@ -0,0 +1,78 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
var (
|
||||
_ pflag.SliceValue = &AllowListFlag{}
|
||||
_ pflag.Value = &AllowListFlag{}
|
||||
)
|
||||
|
||||
// AllowListFlag implements pflag.SliceValue for codersdk.APIAllowListTarget entries.
|
||||
type AllowListFlag []codersdk.APIAllowListTarget
|
||||
|
||||
func AllowListFlagOf(al *[]codersdk.APIAllowListTarget) *AllowListFlag {
|
||||
return (*AllowListFlag)(al)
|
||||
}
|
||||
|
||||
func (a AllowListFlag) String() string {
|
||||
return strings.Join(a.GetSlice(), ",")
|
||||
}
|
||||
|
||||
func (a AllowListFlag) Value() []codersdk.APIAllowListTarget {
|
||||
return []codersdk.APIAllowListTarget(a)
|
||||
}
|
||||
|
||||
func (AllowListFlag) Type() string { return "allow-list" }
|
||||
|
||||
func (a *AllowListFlag) Set(set string) error {
|
||||
values, err := csv.NewReader(strings.NewReader(set)).Read()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse allow list entries as csv: %w", err)
|
||||
}
|
||||
for _, v := range values {
|
||||
if err := a.Append(v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AllowListFlag) Append(value string) error {
|
||||
value = strings.TrimSpace(value)
|
||||
if value == "" {
|
||||
return xerrors.New("allow list entry cannot be empty")
|
||||
}
|
||||
var target codersdk.APIAllowListTarget
|
||||
if err := target.UnmarshalText([]byte(value)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*a = append(*a, target)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AllowListFlag) Replace(items []string) error {
|
||||
*a = []codersdk.APIAllowListTarget{}
|
||||
for _, item := range items {
|
||||
if err := a.Append(item); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AllowListFlag) GetSlice() []string {
|
||||
out := make([]string, len(*a))
|
||||
for i, entry := range *a {
|
||||
out[i] = entry.String()
|
||||
}
|
||||
return out
|
||||
}
|
||||
+18
-9
@@ -296,22 +296,23 @@ func renderTable(out any, sort string, headers table.Row, filterColumns []string
|
||||
// returned. If the table tag is malformed, an error is returned.
|
||||
//
|
||||
// The returned name is transformed from "snake_case" to "normal text".
|
||||
func parseTableStructTag(field reflect.StructField) (name string, defaultSort, noSortOpt, recursive, skipParentName bool, err error) {
|
||||
func parseTableStructTag(field reflect.StructField) (name string, defaultSort, noSortOpt, recursive, skipParentName, emptyNil bool, err error) {
|
||||
tags, err := structtag.Parse(string(field.Tag))
|
||||
if err != nil {
|
||||
return "", false, false, false, false, xerrors.Errorf("parse struct field tag %q: %w", string(field.Tag), err)
|
||||
return "", false, false, false, false, false, xerrors.Errorf("parse struct field tag %q: %w", string(field.Tag), err)
|
||||
}
|
||||
|
||||
tag, err := tags.Get("table")
|
||||
if err != nil || tag.Name == "-" {
|
||||
// tags.Get only returns an error if the tag is not found.
|
||||
return "", false, false, false, false, nil
|
||||
return "", false, false, false, false, false, nil
|
||||
}
|
||||
|
||||
defaultSortOpt := false
|
||||
noSortOpt = false
|
||||
recursiveOpt := false
|
||||
skipParentNameOpt := false
|
||||
emptyNilOpt := false
|
||||
for _, opt := range tag.Options {
|
||||
switch opt {
|
||||
case "default_sort":
|
||||
@@ -326,12 +327,14 @@ func parseTableStructTag(field reflect.StructField) (name string, defaultSort, n
|
||||
// make sure the child name is unique across all nested structs in the parent.
|
||||
recursiveOpt = true
|
||||
skipParentNameOpt = true
|
||||
case "empty_nil":
|
||||
emptyNilOpt = true
|
||||
default:
|
||||
return "", false, false, false, false, xerrors.Errorf("unknown option %q in struct field tag", opt)
|
||||
return "", false, false, false, false, false, xerrors.Errorf("unknown option %q in struct field tag", opt)
|
||||
}
|
||||
}
|
||||
|
||||
return strings.ReplaceAll(tag.Name, "_", " "), defaultSortOpt, noSortOpt, recursiveOpt, skipParentNameOpt, nil
|
||||
return strings.ReplaceAll(tag.Name, "_", " "), defaultSortOpt, noSortOpt, recursiveOpt, skipParentNameOpt, emptyNilOpt, nil
|
||||
}
|
||||
|
||||
func isStructOrStructPointer(t reflect.Type) bool {
|
||||
@@ -358,7 +361,7 @@ func typeToTableHeaders(t reflect.Type, requireDefault bool) ([]string, string,
|
||||
noSortOpt := false
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
name, defaultSort, noSort, recursive, skip, err := parseTableStructTag(field)
|
||||
name, defaultSort, noSort, recursive, skip, _, err := parseTableStructTag(field)
|
||||
if err != nil {
|
||||
return nil, "", xerrors.Errorf("parse struct tags for field %q in type %q: %w", field.Name, t.String(), err)
|
||||
}
|
||||
@@ -435,7 +438,7 @@ func valueToTableMap(val reflect.Value) (map[string]any, error) {
|
||||
for i := 0; i < val.NumField(); i++ {
|
||||
field := val.Type().Field(i)
|
||||
fieldVal := val.Field(i)
|
||||
name, _, _, recursive, skip, err := parseTableStructTag(field)
|
||||
name, _, _, recursive, skip, emptyNil, err := parseTableStructTag(field)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse struct tags for field %q in type %T: %w", field.Name, val, err)
|
||||
}
|
||||
@@ -443,8 +446,14 @@ func valueToTableMap(val reflect.Value) (map[string]any, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Recurse if it's a struct.
|
||||
fieldType := field.Type
|
||||
|
||||
// If empty_nil is set and this is a nil pointer, use a zero value.
|
||||
if emptyNil && fieldVal.Kind() == reflect.Pointer && fieldVal.IsNil() {
|
||||
fieldVal = reflect.New(fieldType.Elem())
|
||||
}
|
||||
|
||||
// Recurse if it's a struct.
|
||||
if recursive {
|
||||
if !isStructOrStructPointer(fieldType) {
|
||||
return nil, xerrors.Errorf("field %q in type %q is marked as recursive but does not contain a struct or a pointer to a struct", field.Name, fieldType.String())
|
||||
@@ -467,7 +476,7 @@ func valueToTableMap(val reflect.Value) (map[string]any, error) {
|
||||
}
|
||||
|
||||
// Otherwise, we just use the field value.
|
||||
row[name] = val.Field(i).Interface()
|
||||
row[name] = fieldVal.Interface()
|
||||
}
|
||||
|
||||
return row, nil
|
||||
|
||||
@@ -400,6 +400,78 @@ foo <nil> 10 [a, b, c] foo1 11 foo2 12 fo
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("EmptyNil", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type emptyNilTest struct {
|
||||
Name string `table:"name,default_sort"`
|
||||
EmptyOnNil *string `table:"empty_on_nil,empty_nil"`
|
||||
NormalBehavior *string `table:"normal_behavior"`
|
||||
}
|
||||
|
||||
value := "value"
|
||||
in := []emptyNilTest{
|
||||
{
|
||||
Name: "has_value",
|
||||
EmptyOnNil: &value,
|
||||
NormalBehavior: &value,
|
||||
},
|
||||
{
|
||||
Name: "has_nil",
|
||||
EmptyOnNil: nil,
|
||||
NormalBehavior: nil,
|
||||
},
|
||||
}
|
||||
|
||||
expected := `
|
||||
NAME EMPTY ON NIL NORMAL BEHAVIOR
|
||||
has_nil <nil>
|
||||
has_value value value
|
||||
`
|
||||
|
||||
out, err := cliui.DisplayTable(in, "", nil)
|
||||
log.Println("rendered table:\n" + out)
|
||||
require.NoError(t, err)
|
||||
compareTables(t, expected, out)
|
||||
})
|
||||
|
||||
t.Run("EmptyNilWithRecursiveInline", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type nestedData struct {
|
||||
Name string `table:"name"`
|
||||
}
|
||||
|
||||
type inlineTest struct {
|
||||
Nested *nestedData `table:"ignored,recursive_inline,empty_nil"`
|
||||
Count int `table:"count,default_sort"`
|
||||
}
|
||||
|
||||
in := []inlineTest{
|
||||
{
|
||||
Nested: &nestedData{
|
||||
Name: "alice",
|
||||
},
|
||||
Count: 1,
|
||||
},
|
||||
{
|
||||
Nested: nil,
|
||||
Count: 2,
|
||||
},
|
||||
}
|
||||
|
||||
expected := `
|
||||
NAME COUNT
|
||||
alice 1
|
||||
2
|
||||
`
|
||||
|
||||
out, err := cliui.DisplayTable(in, "", nil)
|
||||
log.Println("rendered table:\n" + out)
|
||||
require.NoError(t, err)
|
||||
compareTables(t, expected, out)
|
||||
})
|
||||
}
|
||||
|
||||
// compareTables normalizes the incoming table lines
|
||||
|
||||
@@ -185,9 +185,6 @@ func TestDelete(t *testing.T) {
|
||||
|
||||
t.Run("WarnNoProvisioners", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if !dbtestutil.WillUsePostgres() {
|
||||
t.Skip("this test requires postgres")
|
||||
}
|
||||
|
||||
store, ps, db := dbtestutil.NewDBWithSQLDB(t)
|
||||
client, closeDaemon := coderdtest.NewWithProvisionerCloser(t, &coderdtest.Options{
|
||||
@@ -228,9 +225,6 @@ func TestDelete(t *testing.T) {
|
||||
|
||||
t.Run("Prebuilt workspace delete permissions", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if !dbtestutil.WillUsePostgres() {
|
||||
t.Skip("this test requires postgres")
|
||||
}
|
||||
|
||||
// Setup
|
||||
db, pb := dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure())
|
||||
|
||||
+98
-383
@@ -29,7 +29,6 @@ import (
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
notificationsLib "github.com/coder/coder/v2/coderd/notifications"
|
||||
"github.com/coder/coder/v2/coderd/tracing"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/workspacesdk"
|
||||
@@ -40,7 +39,6 @@ import (
|
||||
"github.com/coder/coder/v2/scaletest/dashboard"
|
||||
"github.com/coder/coder/v2/scaletest/harness"
|
||||
"github.com/coder/coder/v2/scaletest/loadtestutil"
|
||||
"github.com/coder/coder/v2/scaletest/notifications"
|
||||
"github.com/coder/coder/v2/scaletest/reconnectingpty"
|
||||
"github.com/coder/coder/v2/scaletest/workspacebuild"
|
||||
"github.com/coder/coder/v2/scaletest/workspacetraffic"
|
||||
@@ -66,6 +64,7 @@ func (r *RootCmd) scaletestCmd() *serpent.Command {
|
||||
r.scaletestWorkspaceTraffic(),
|
||||
r.scaletestAutostart(),
|
||||
r.scaletestNotifications(),
|
||||
r.scaletestSMTP(),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -385,6 +384,88 @@ func (s *scaletestPrometheusFlags) attach(opts *serpent.OptionSet) {
|
||||
)
|
||||
}
|
||||
|
||||
// workspaceTargetFlags holds common flags for targeting specific workspaces in scale tests.
|
||||
type workspaceTargetFlags struct {
|
||||
template string
|
||||
targetWorkspaces string
|
||||
useHostLogin bool
|
||||
}
|
||||
|
||||
// attach adds the workspace target flags to the given options set.
|
||||
func (f *workspaceTargetFlags) attach(opts *serpent.OptionSet) {
|
||||
*opts = append(*opts,
|
||||
serpent.Option{
|
||||
Flag: "template",
|
||||
FlagShorthand: "t",
|
||||
Env: "CODER_SCALETEST_TEMPLATE",
|
||||
Description: "Name or ID of the template. Traffic generation will be limited to workspaces created from this template.",
|
||||
Value: serpent.StringOf(&f.template),
|
||||
},
|
||||
serpent.Option{
|
||||
Flag: "target-workspaces",
|
||||
Env: "CODER_SCALETEST_TARGET_WORKSPACES",
|
||||
Description: "Target a specific range of workspaces in the format [START]:[END] (exclusive). Example: 0:10 will target the 10 first alphabetically sorted workspaces (0-9).",
|
||||
Value: serpent.StringOf(&f.targetWorkspaces),
|
||||
},
|
||||
serpent.Option{
|
||||
Flag: "use-host-login",
|
||||
Env: "CODER_SCALETEST_USE_HOST_LOGIN",
|
||||
Default: "false",
|
||||
Description: "Connect as the currently logged in user.",
|
||||
Value: serpent.BoolOf(&f.useHostLogin),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// getTargetedWorkspaces retrieves the workspaces based on the template filter and target range. warnWriter is where to
|
||||
// write a warning message if any workspaces were skipped due to ownership mismatch.
|
||||
func (f *workspaceTargetFlags) getTargetedWorkspaces(ctx context.Context, client *codersdk.Client, organizationIDs []uuid.UUID, warnWriter io.Writer) ([]codersdk.Workspace, error) {
|
||||
// Validate template if provided
|
||||
if f.template != "" {
|
||||
_, err := parseTemplate(ctx, client, organizationIDs, f.template)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse template: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Parse target range
|
||||
targetStart, targetEnd, err := parseTargetRange("workspaces", f.targetWorkspaces)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse target workspaces: %w", err)
|
||||
}
|
||||
|
||||
// Determine owner based on useHostLogin
|
||||
var owner string
|
||||
if f.useHostLogin {
|
||||
owner = codersdk.Me
|
||||
}
|
||||
|
||||
// Get workspaces
|
||||
workspaces, numSkipped, err := getScaletestWorkspaces(ctx, client, owner, f.template)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if numSkipped > 0 {
|
||||
cliui.Warnf(warnWriter, "CODER_DISABLE_OWNER_WORKSPACE_ACCESS is set on the deployment.\n\t%d workspace(s) were skipped due to ownership mismatch.\n\tSet --use-host-login to only target workspaces you own.", numSkipped)
|
||||
}
|
||||
|
||||
// Adjust targetEnd if not specified
|
||||
if targetEnd == 0 {
|
||||
targetEnd = len(workspaces)
|
||||
}
|
||||
|
||||
// Validate range
|
||||
if len(workspaces) == 0 {
|
||||
return nil, xerrors.Errorf("no scaletest workspaces exist")
|
||||
}
|
||||
if targetEnd > len(workspaces) {
|
||||
return nil, xerrors.Errorf("target workspace end %d is greater than the number of workspaces %d", targetEnd, len(workspaces))
|
||||
}
|
||||
|
||||
// Return the sliced workspaces
|
||||
return workspaces[targetStart:targetEnd], nil
|
||||
}
|
||||
|
||||
func requireAdmin(ctx context.Context, client *codersdk.Client) (codersdk.User, error) {
|
||||
me, err := client.User(ctx, codersdk.Me)
|
||||
if err != nil {
|
||||
@@ -1194,12 +1275,10 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
bytesPerTick int64
|
||||
ssh bool
|
||||
disableDirect bool
|
||||
useHostLogin bool
|
||||
app string
|
||||
template string
|
||||
targetWorkspaces string
|
||||
workspaceProxyURL string
|
||||
|
||||
targetFlags = &workspaceTargetFlags{}
|
||||
tracingFlags = &scaletestTracingFlags{}
|
||||
strategy = &scaletestStrategyFlags{}
|
||||
cleanupStrategy = newScaletestCleanupStrategy()
|
||||
@@ -1244,15 +1323,9 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
},
|
||||
}
|
||||
|
||||
if template != "" {
|
||||
_, err := parseTemplate(ctx, client, me.OrganizationIDs, template)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse template: %w", err)
|
||||
}
|
||||
}
|
||||
targetWorkspaceStart, targetWorkspaceEnd, err := parseTargetRange("workspaces", targetWorkspaces)
|
||||
workspaces, err := targetFlags.getTargetedWorkspaces(ctx, client, me.OrganizationIDs, inv.Stdout)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse target workspaces: %w", err)
|
||||
return err
|
||||
}
|
||||
|
||||
appHost, err := client.AppHost(ctx)
|
||||
@@ -1260,30 +1333,6 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
return xerrors.Errorf("get app host: %w", err)
|
||||
}
|
||||
|
||||
var owner string
|
||||
if useHostLogin {
|
||||
owner = codersdk.Me
|
||||
}
|
||||
|
||||
workspaces, numSkipped, err := getScaletestWorkspaces(inv.Context(), client, owner, template)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if numSkipped > 0 {
|
||||
cliui.Warnf(inv.Stdout, "CODER_DISABLE_OWNER_WORKSPACE_ACCESS is set on the deployment.\n\t%d workspace(s) were skipped due to ownership mismatch.\n\tSet --use-host-login to only target workspaces you own.", numSkipped)
|
||||
}
|
||||
|
||||
if targetWorkspaceEnd == 0 {
|
||||
targetWorkspaceEnd = len(workspaces)
|
||||
}
|
||||
|
||||
if len(workspaces) == 0 {
|
||||
return xerrors.Errorf("no scaletest workspaces exist")
|
||||
}
|
||||
if targetWorkspaceEnd > len(workspaces) {
|
||||
return xerrors.Errorf("target workspace end %d is greater than the number of workspaces %d", targetWorkspaceEnd, len(workspaces))
|
||||
}
|
||||
|
||||
tracerProvider, closeTracing, tracingEnabled, err := tracingFlags.provider(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create tracer provider: %w", err)
|
||||
@@ -1308,10 +1357,6 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
|
||||
th := harness.NewTestHarness(strategy.toStrategy(), cleanupStrategy.toStrategy())
|
||||
for idx, ws := range workspaces {
|
||||
if idx < targetWorkspaceStart || idx >= targetWorkspaceEnd {
|
||||
continue
|
||||
}
|
||||
|
||||
var (
|
||||
agent codersdk.WorkspaceAgent
|
||||
name = "workspace-traffic"
|
||||
@@ -1416,19 +1461,6 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
}
|
||||
|
||||
cmd.Options = []serpent.Option{
|
||||
{
|
||||
Flag: "template",
|
||||
FlagShorthand: "t",
|
||||
Env: "CODER_SCALETEST_TEMPLATE",
|
||||
Description: "Name or ID of the template. Traffic generation will be limited to workspaces created from this template.",
|
||||
Value: serpent.StringOf(&template),
|
||||
},
|
||||
{
|
||||
Flag: "target-workspaces",
|
||||
Env: "CODER_SCALETEST_TARGET_WORKSPACES",
|
||||
Description: "Target a specific range of workspaces in the format [START]:[END] (exclusive). Example: 0:10 will target the 10 first alphabetically sorted workspaces (0-9).",
|
||||
Value: serpent.StringOf(&targetWorkspaces),
|
||||
},
|
||||
{
|
||||
Flag: "bytes-per-tick",
|
||||
Env: "CODER_SCALETEST_WORKSPACE_TRAFFIC_BYTES_PER_TICK",
|
||||
@@ -1464,13 +1496,6 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
Description: "Send WebSocket traffic to a workspace app (proxied via coderd), cannot be used with --ssh.",
|
||||
Value: serpent.StringOf(&app),
|
||||
},
|
||||
{
|
||||
Flag: "use-host-login",
|
||||
Env: "CODER_SCALETEST_USE_HOST_LOGIN",
|
||||
Default: "false",
|
||||
Description: "Connect as the currently logged in user.",
|
||||
Value: serpent.BoolOf(&useHostLogin),
|
||||
},
|
||||
{
|
||||
Flag: "workspace-proxy-url",
|
||||
Env: "CODER_SCALETEST_WORKSPACE_PROXY_URL",
|
||||
@@ -1480,6 +1505,7 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
},
|
||||
}
|
||||
|
||||
targetFlags.attach(&cmd.Options)
|
||||
tracingFlags.attach(&cmd.Options)
|
||||
strategy.attach(&cmd.Options)
|
||||
cleanupStrategy.attach(&cmd.Options)
|
||||
@@ -1921,259 +1947,6 @@ func (r *RootCmd) scaletestAutostart() *serpent.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (r *RootCmd) scaletestNotifications() *serpent.Command {
|
||||
var (
|
||||
userCount int64
|
||||
ownerUserPercentage float64
|
||||
notificationTimeout time.Duration
|
||||
dialTimeout time.Duration
|
||||
noCleanup bool
|
||||
|
||||
tracingFlags = &scaletestTracingFlags{}
|
||||
|
||||
// This test requires unlimited concurrency.
|
||||
timeoutStrategy = &timeoutFlags{}
|
||||
cleanupStrategy = newScaletestCleanupStrategy()
|
||||
output = &scaletestOutputFlags{}
|
||||
prometheusFlags = &scaletestPrometheusFlags{}
|
||||
)
|
||||
|
||||
cmd := &serpent.Command{
|
||||
Use: "notifications",
|
||||
Short: "Simulate notification delivery by creating many users listening to notifications.",
|
||||
Handler: func(inv *serpent.Invocation) error {
|
||||
ctx := inv.Context()
|
||||
client, err := r.InitClient(inv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
notifyCtx, stop := signal.NotifyContext(ctx, StopSignals...)
|
||||
defer stop()
|
||||
ctx = notifyCtx
|
||||
|
||||
me, err := requireAdmin(ctx, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client.HTTPClient = &http.Client{
|
||||
Transport: &codersdk.HeaderTransport{
|
||||
Transport: http.DefaultTransport,
|
||||
Header: map[string][]string{
|
||||
codersdk.BypassRatelimitHeader: {"true"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if userCount <= 0 {
|
||||
return xerrors.Errorf("--user-count must be greater than 0")
|
||||
}
|
||||
|
||||
if ownerUserPercentage < 0 || ownerUserPercentage > 100 {
|
||||
return xerrors.Errorf("--owner-user-percentage must be between 0 and 100")
|
||||
}
|
||||
|
||||
ownerUserCount := int64(float64(userCount) * ownerUserPercentage / 100)
|
||||
if ownerUserCount == 0 && ownerUserPercentage > 0 {
|
||||
ownerUserCount = 1
|
||||
}
|
||||
regularUserCount := userCount - ownerUserCount
|
||||
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "Distribution plan:\n")
|
||||
_, _ = fmt.Fprintf(inv.Stderr, " Total users: %d\n", userCount)
|
||||
_, _ = fmt.Fprintf(inv.Stderr, " Owner users: %d (%.1f%%)\n", ownerUserCount, ownerUserPercentage)
|
||||
_, _ = fmt.Fprintf(inv.Stderr, " Regular users: %d (%.1f%%)\n", regularUserCount, 100.0-ownerUserPercentage)
|
||||
|
||||
outputs, err := output.parse()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not parse --output flags")
|
||||
}
|
||||
|
||||
tracerProvider, closeTracing, tracingEnabled, err := tracingFlags.provider(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create tracer provider: %w", err)
|
||||
}
|
||||
tracer := tracerProvider.Tracer(scaletestTracerName)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := notifications.NewMetrics(reg)
|
||||
|
||||
logger := inv.Logger
|
||||
prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), prometheusFlags.Address, "prometheus")
|
||||
defer prometheusSrvClose()
|
||||
|
||||
defer func() {
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "\nUploading traces...")
|
||||
if err := closeTracing(ctx); err != nil {
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "\nError uploading traces: %+v\n", err)
|
||||
}
|
||||
// Wait for prometheus metrics to be scraped
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "Waiting %s for prometheus metrics to be scraped\n", prometheusFlags.Wait)
|
||||
<-time.After(prometheusFlags.Wait)
|
||||
}()
|
||||
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "Creating users...")
|
||||
|
||||
dialBarrier := &sync.WaitGroup{}
|
||||
ownerWatchBarrier := &sync.WaitGroup{}
|
||||
dialBarrier.Add(int(userCount))
|
||||
ownerWatchBarrier.Add(int(ownerUserCount))
|
||||
|
||||
expectedNotifications := map[uuid.UUID]chan time.Time{
|
||||
notificationsLib.TemplateUserAccountCreated: make(chan time.Time, 1),
|
||||
notificationsLib.TemplateUserAccountDeleted: make(chan time.Time, 1),
|
||||
}
|
||||
|
||||
configs := make([]notifications.Config, 0, userCount)
|
||||
for range ownerUserCount {
|
||||
config := notifications.Config{
|
||||
User: createusers.Config{
|
||||
OrganizationID: me.OrganizationIDs[0],
|
||||
},
|
||||
Roles: []string{codersdk.RoleOwner},
|
||||
NotificationTimeout: notificationTimeout,
|
||||
DialTimeout: dialTimeout,
|
||||
DialBarrier: dialBarrier,
|
||||
ReceivingWatchBarrier: ownerWatchBarrier,
|
||||
ExpectedNotifications: expectedNotifications,
|
||||
Metrics: metrics,
|
||||
}
|
||||
if err := config.Validate(); err != nil {
|
||||
return xerrors.Errorf("validate config: %w", err)
|
||||
}
|
||||
configs = append(configs, config)
|
||||
}
|
||||
for range regularUserCount {
|
||||
config := notifications.Config{
|
||||
User: createusers.Config{
|
||||
OrganizationID: me.OrganizationIDs[0],
|
||||
},
|
||||
Roles: []string{},
|
||||
NotificationTimeout: notificationTimeout,
|
||||
DialTimeout: dialTimeout,
|
||||
DialBarrier: dialBarrier,
|
||||
ReceivingWatchBarrier: ownerWatchBarrier,
|
||||
Metrics: metrics,
|
||||
}
|
||||
if err := config.Validate(); err != nil {
|
||||
return xerrors.Errorf("validate config: %w", err)
|
||||
}
|
||||
configs = append(configs, config)
|
||||
}
|
||||
|
||||
go triggerUserNotifications(
|
||||
ctx,
|
||||
logger,
|
||||
client,
|
||||
me.OrganizationIDs[0],
|
||||
dialBarrier,
|
||||
dialTimeout,
|
||||
expectedNotifications,
|
||||
)
|
||||
|
||||
th := harness.NewTestHarness(timeoutStrategy.wrapStrategy(harness.ConcurrentExecutionStrategy{}), cleanupStrategy.toStrategy())
|
||||
|
||||
for i, config := range configs {
|
||||
id := strconv.Itoa(i)
|
||||
name := fmt.Sprintf("notifications-%s", id)
|
||||
var runner harness.Runnable = notifications.NewRunner(client, config)
|
||||
if tracingEnabled {
|
||||
runner = &runnableTraceWrapper{
|
||||
tracer: tracer,
|
||||
spanName: name,
|
||||
runner: runner,
|
||||
}
|
||||
}
|
||||
|
||||
th.AddRun(name, id, runner)
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "Running notification delivery scaletest...")
|
||||
testCtx, testCancel := timeoutStrategy.toContext(ctx)
|
||||
defer testCancel()
|
||||
err = th.Run(testCtx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("run test harness (harness failure, not a test failure): %w", err)
|
||||
}
|
||||
|
||||
// If the command was interrupted, skip stats.
|
||||
if notifyCtx.Err() != nil {
|
||||
return notifyCtx.Err()
|
||||
}
|
||||
|
||||
res := th.Results()
|
||||
for _, o := range outputs {
|
||||
err = o.write(res, inv.Stdout)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("write output %q to %q: %w", o.format, o.path, err)
|
||||
}
|
||||
}
|
||||
|
||||
if !noCleanup {
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "\nCleaning up...")
|
||||
cleanupCtx, cleanupCancel := cleanupStrategy.toContext(ctx)
|
||||
defer cleanupCancel()
|
||||
err = th.Cleanup(cleanupCtx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("cleanup tests: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if res.TotalFail > 0 {
|
||||
return xerrors.New("load test failed, see above for more details")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Options = serpent.OptionSet{
|
||||
{
|
||||
Flag: "user-count",
|
||||
FlagShorthand: "c",
|
||||
Env: "CODER_SCALETEST_NOTIFICATION_USER_COUNT",
|
||||
Description: "Required: Total number of users to create.",
|
||||
Value: serpent.Int64Of(&userCount),
|
||||
Required: true,
|
||||
},
|
||||
{
|
||||
Flag: "owner-user-percentage",
|
||||
Env: "CODER_SCALETEST_NOTIFICATION_OWNER_USER_PERCENTAGE",
|
||||
Default: "20.0",
|
||||
Description: "Percentage of users to assign Owner role to (0-100).",
|
||||
Value: serpent.Float64Of(&ownerUserPercentage),
|
||||
},
|
||||
{
|
||||
Flag: "notification-timeout",
|
||||
Env: "CODER_SCALETEST_NOTIFICATION_TIMEOUT",
|
||||
Default: "5m",
|
||||
Description: "How long to wait for notifications after triggering.",
|
||||
Value: serpent.DurationOf(¬ificationTimeout),
|
||||
},
|
||||
{
|
||||
Flag: "dial-timeout",
|
||||
Env: "CODER_SCALETEST_DIAL_TIMEOUT",
|
||||
Default: "2m",
|
||||
Description: "Timeout for dialing the notification websocket endpoint.",
|
||||
Value: serpent.DurationOf(&dialTimeout),
|
||||
},
|
||||
{
|
||||
Flag: "no-cleanup",
|
||||
Env: "CODER_SCALETEST_NO_CLEANUP",
|
||||
Description: "Do not clean up resources after the test completes.",
|
||||
Value: serpent.BoolOf(&noCleanup),
|
||||
},
|
||||
}
|
||||
|
||||
tracingFlags.attach(&cmd.Options)
|
||||
timeoutStrategy.attach(&cmd.Options)
|
||||
cleanupStrategy.attach(&cmd.Options)
|
||||
output.attach(&cmd.Options)
|
||||
prometheusFlags.attach(&cmd.Options)
|
||||
return cmd
|
||||
}
|
||||
|
||||
type runnableTraceWrapper struct {
|
||||
tracer trace.Tracer
|
||||
spanName string
|
||||
@@ -2183,8 +1956,9 @@ type runnableTraceWrapper struct {
|
||||
}
|
||||
|
||||
var (
|
||||
_ harness.Runnable = &runnableTraceWrapper{}
|
||||
_ harness.Cleanable = &runnableTraceWrapper{}
|
||||
_ harness.Runnable = &runnableTraceWrapper{}
|
||||
_ harness.Cleanable = &runnableTraceWrapper{}
|
||||
_ harness.Collectable = &runnableTraceWrapper{}
|
||||
)
|
||||
|
||||
func (r *runnableTraceWrapper) Run(ctx context.Context, id string, logs io.Writer) error {
|
||||
@@ -2226,6 +2000,14 @@ func (r *runnableTraceWrapper) Cleanup(ctx context.Context, id string, logs io.W
|
||||
return c.Cleanup(ctx, id, logs)
|
||||
}
|
||||
|
||||
func (r *runnableTraceWrapper) GetMetrics() map[string]any {
|
||||
c, ok := r.runner.(harness.Collectable)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return c.GetMetrics()
|
||||
}
|
||||
|
||||
func getScaletestWorkspaces(ctx context.Context, client *codersdk.Client, owner, template string) ([]codersdk.Workspace, int, error) {
|
||||
var (
|
||||
pageNumber = 0
|
||||
@@ -2374,73 +2156,6 @@ func parseTargetRange(name, targets string) (start, end int, err error) {
|
||||
return start, end, nil
|
||||
}
|
||||
|
||||
// triggerUserNotifications waits for all test users to connect,
|
||||
// then creates and deletes a test user to trigger notification events for testing.
|
||||
func triggerUserNotifications(
|
||||
ctx context.Context,
|
||||
logger slog.Logger,
|
||||
client *codersdk.Client,
|
||||
orgID uuid.UUID,
|
||||
dialBarrier *sync.WaitGroup,
|
||||
dialTimeout time.Duration,
|
||||
expectedNotifications map[uuid.UUID]chan time.Time,
|
||||
) {
|
||||
logger.Info(ctx, "waiting for all users to connect")
|
||||
|
||||
// Wait for all users to connect
|
||||
waitCtx, cancel := context.WithTimeout(ctx, dialTimeout+30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
dialBarrier.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
logger.Info(ctx, "all users connected")
|
||||
case <-waitCtx.Done():
|
||||
if waitCtx.Err() == context.DeadlineExceeded {
|
||||
logger.Error(ctx, "timeout waiting for users to connect")
|
||||
} else {
|
||||
logger.Info(ctx, "context canceled while waiting for users")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const (
|
||||
triggerUsername = "scaletest-trigger-user"
|
||||
triggerEmail = "scaletest-trigger@example.com"
|
||||
)
|
||||
|
||||
logger.Info(ctx, "creating test user to test notifications",
|
||||
slog.F("username", triggerUsername),
|
||||
slog.F("email", triggerEmail),
|
||||
slog.F("org_id", orgID))
|
||||
|
||||
testUser, err := client.CreateUserWithOrgs(ctx, codersdk.CreateUserRequestWithOrgs{
|
||||
OrganizationIDs: []uuid.UUID{orgID},
|
||||
Username: triggerUsername,
|
||||
Email: triggerEmail,
|
||||
Password: "test-password-123",
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(ctx, "create test user", slog.Error(err))
|
||||
return
|
||||
}
|
||||
expectedNotifications[notificationsLib.TemplateUserAccountCreated] <- time.Now()
|
||||
|
||||
err = client.DeleteUser(ctx, testUser.ID)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "delete test user", slog.Error(err))
|
||||
return
|
||||
}
|
||||
expectedNotifications[notificationsLib.TemplateUserAccountDeleted] <- time.Now()
|
||||
close(expectedNotifications[notificationsLib.TemplateUserAccountCreated])
|
||||
close(expectedNotifications[notificationsLib.TemplateUserAccountDeleted])
|
||||
}
|
||||
|
||||
func createWorkspaceAppConfig(client *codersdk.Client, appHost, app string, workspace codersdk.Workspace, agent codersdk.WorkspaceAgent) (workspacetraffic.AppConfig, error) {
|
||||
if app == "" {
|
||||
return workspacetraffic.AppConfig{}, nil
|
||||
|
||||
@@ -27,6 +27,7 @@ const (
|
||||
func (r *RootCmd) scaletestDynamicParameters() *serpent.Command {
|
||||
var (
|
||||
templateName string
|
||||
provisionerTags []string
|
||||
numEvals int64
|
||||
tracingFlags = &scaletestTracingFlags{}
|
||||
prometheusFlags = &scaletestPrometheusFlags{}
|
||||
@@ -56,6 +57,11 @@ func (r *RootCmd) scaletestDynamicParameters() *serpent.Command {
|
||||
return xerrors.Errorf("template cannot be empty")
|
||||
}
|
||||
|
||||
tags, err := ParseProvisionerTags(provisionerTags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
org, err := orgContext.Selected(inv, client)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -99,7 +105,7 @@ func (r *RootCmd) scaletestDynamicParameters() *serpent.Command {
|
||||
}()
|
||||
tracer := tracerProvider.Tracer(scaletestTracerName)
|
||||
|
||||
partitions, err := dynamicparameters.SetupPartitions(ctx, client, org.ID, templateName, numEvals, logger)
|
||||
partitions, err := dynamicparameters.SetupPartitions(ctx, client, org.ID, templateName, tags, numEvals, logger)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setup dynamic parameters partitions: %w", err)
|
||||
}
|
||||
@@ -160,6 +166,11 @@ func (r *RootCmd) scaletestDynamicParameters() *serpent.Command {
|
||||
Default: "100",
|
||||
Value: serpent.Int64Of(&numEvals),
|
||||
},
|
||||
{
|
||||
Flag: "provisioner-tag",
|
||||
Description: "Specify a set of tags to target provisioner daemons.",
|
||||
Value: serpent.StringArrayOf(&provisionerTags),
|
||||
},
|
||||
}
|
||||
orgContext.AttachOptions(cmd)
|
||||
output.attach(&cmd.Options)
|
||||
|
||||
@@ -0,0 +1,470 @@
|
||||
//go:build !slim
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
notificationsLib "github.com/coder/coder/v2/coderd/notifications"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/scaletest/createusers"
|
||||
"github.com/coder/coder/v2/scaletest/harness"
|
||||
"github.com/coder/coder/v2/scaletest/notifications"
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
|
||||
func (r *RootCmd) scaletestNotifications() *serpent.Command {
|
||||
var (
|
||||
userCount int64
|
||||
templateAdminPercentage float64
|
||||
notificationTimeout time.Duration
|
||||
smtpRequestTimeout time.Duration
|
||||
dialTimeout time.Duration
|
||||
noCleanup bool
|
||||
smtpAPIURL string
|
||||
|
||||
tracingFlags = &scaletestTracingFlags{}
|
||||
|
||||
// This test requires unlimited concurrency.
|
||||
timeoutStrategy = &timeoutFlags{}
|
||||
cleanupStrategy = newScaletestCleanupStrategy()
|
||||
output = &scaletestOutputFlags{}
|
||||
prometheusFlags = &scaletestPrometheusFlags{}
|
||||
)
|
||||
|
||||
cmd := &serpent.Command{
|
||||
Use: "notifications",
|
||||
Short: "Simulate notification delivery by creating many users listening to notifications.",
|
||||
Handler: func(inv *serpent.Invocation) error {
|
||||
ctx := inv.Context()
|
||||
client, err := r.InitClient(inv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
notifyCtx, stop := signal.NotifyContext(ctx, StopSignals...)
|
||||
defer stop()
|
||||
ctx = notifyCtx
|
||||
|
||||
me, err := requireAdmin(ctx, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client.HTTPClient = &http.Client{
|
||||
Transport: &codersdk.HeaderTransport{
|
||||
Transport: http.DefaultTransport,
|
||||
Header: map[string][]string{
|
||||
codersdk.BypassRatelimitHeader: {"true"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if userCount <= 0 {
|
||||
return xerrors.Errorf("--user-count must be greater than 0")
|
||||
}
|
||||
|
||||
if templateAdminPercentage < 0 || templateAdminPercentage > 100 {
|
||||
return xerrors.Errorf("--template-admin-percentage must be between 0 and 100")
|
||||
}
|
||||
|
||||
if smtpAPIURL != "" && !strings.HasPrefix(smtpAPIURL, "http://") && !strings.HasPrefix(smtpAPIURL, "https://") {
|
||||
return xerrors.Errorf("--smtp-api-url must start with http:// or https://")
|
||||
}
|
||||
|
||||
templateAdminCount := int64(float64(userCount) * templateAdminPercentage / 100)
|
||||
if templateAdminCount == 0 && templateAdminPercentage > 0 {
|
||||
templateAdminCount = 1
|
||||
}
|
||||
regularUserCount := userCount - templateAdminCount
|
||||
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "Distribution plan:\n")
|
||||
_, _ = fmt.Fprintf(inv.Stderr, " Total users: %d\n", userCount)
|
||||
_, _ = fmt.Fprintf(inv.Stderr, " Template admins: %d (%.1f%%)\n", templateAdminCount, templateAdminPercentage)
|
||||
_, _ = fmt.Fprintf(inv.Stderr, " Regular users: %d (%.1f%%)\n", regularUserCount, 100.0-templateAdminPercentage)
|
||||
|
||||
outputs, err := output.parse()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not parse --output flags")
|
||||
}
|
||||
|
||||
tracerProvider, closeTracing, tracingEnabled, err := tracingFlags.provider(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create tracer provider: %w", err)
|
||||
}
|
||||
tracer := tracerProvider.Tracer(scaletestTracerName)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := notifications.NewMetrics(reg)
|
||||
|
||||
logger := inv.Logger
|
||||
prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), prometheusFlags.Address, "prometheus")
|
||||
defer prometheusSrvClose()
|
||||
|
||||
defer func() {
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "\nUploading traces...")
|
||||
if err := closeTracing(ctx); err != nil {
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "\nError uploading traces: %+v\n", err)
|
||||
}
|
||||
// Wait for prometheus metrics to be scraped
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "Waiting %s for prometheus metrics to be scraped\n", prometheusFlags.Wait)
|
||||
<-time.After(prometheusFlags.Wait)
|
||||
}()
|
||||
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "Creating users...")
|
||||
|
||||
dialBarrier := &sync.WaitGroup{}
|
||||
templateAdminWatchBarrier := &sync.WaitGroup{}
|
||||
dialBarrier.Add(int(userCount))
|
||||
templateAdminWatchBarrier.Add(int(templateAdminCount))
|
||||
|
||||
expectedNotificationIDs := map[uuid.UUID]struct{}{
|
||||
notificationsLib.TemplateTemplateDeleted: {},
|
||||
}
|
||||
|
||||
triggerTimes := make(map[uuid.UUID]chan time.Time, len(expectedNotificationIDs))
|
||||
for id := range expectedNotificationIDs {
|
||||
triggerTimes[id] = make(chan time.Time, 1)
|
||||
}
|
||||
|
||||
configs := make([]notifications.Config, 0, userCount)
|
||||
for range templateAdminCount {
|
||||
config := notifications.Config{
|
||||
User: createusers.Config{
|
||||
OrganizationID: me.OrganizationIDs[0],
|
||||
},
|
||||
Roles: []string{codersdk.RoleTemplateAdmin},
|
||||
NotificationTimeout: notificationTimeout,
|
||||
DialTimeout: dialTimeout,
|
||||
DialBarrier: dialBarrier,
|
||||
ReceivingWatchBarrier: templateAdminWatchBarrier,
|
||||
ExpectedNotificationsIDs: expectedNotificationIDs,
|
||||
Metrics: metrics,
|
||||
SMTPApiURL: smtpAPIURL,
|
||||
SMTPRequestTimeout: smtpRequestTimeout,
|
||||
}
|
||||
if err := config.Validate(); err != nil {
|
||||
return xerrors.Errorf("validate config: %w", err)
|
||||
}
|
||||
configs = append(configs, config)
|
||||
}
|
||||
for range regularUserCount {
|
||||
config := notifications.Config{
|
||||
User: createusers.Config{
|
||||
OrganizationID: me.OrganizationIDs[0],
|
||||
},
|
||||
Roles: []string{},
|
||||
NotificationTimeout: notificationTimeout,
|
||||
DialTimeout: dialTimeout,
|
||||
DialBarrier: dialBarrier,
|
||||
ReceivingWatchBarrier: templateAdminWatchBarrier,
|
||||
Metrics: metrics,
|
||||
}
|
||||
if err := config.Validate(); err != nil {
|
||||
return xerrors.Errorf("validate config: %w", err)
|
||||
}
|
||||
configs = append(configs, config)
|
||||
}
|
||||
|
||||
go triggerNotifications(
|
||||
ctx,
|
||||
logger,
|
||||
client,
|
||||
me.OrganizationIDs[0],
|
||||
dialBarrier,
|
||||
dialTimeout,
|
||||
triggerTimes,
|
||||
)
|
||||
|
||||
th := harness.NewTestHarness(timeoutStrategy.wrapStrategy(harness.ConcurrentExecutionStrategy{}), cleanupStrategy.toStrategy())
|
||||
|
||||
for i, config := range configs {
|
||||
id := strconv.Itoa(i)
|
||||
name := fmt.Sprintf("notifications-%s", id)
|
||||
var runner harness.Runnable = notifications.NewRunner(client, config)
|
||||
if tracingEnabled {
|
||||
runner = &runnableTraceWrapper{
|
||||
tracer: tracer,
|
||||
spanName: name,
|
||||
runner: runner,
|
||||
}
|
||||
}
|
||||
|
||||
th.AddRun(name, id, runner)
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "Running notification delivery scaletest...")
|
||||
testCtx, testCancel := timeoutStrategy.toContext(ctx)
|
||||
defer testCancel()
|
||||
err = th.Run(testCtx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("run test harness (harness failure, not a test failure): %w", err)
|
||||
}
|
||||
|
||||
// If the command was interrupted, skip stats.
|
||||
if notifyCtx.Err() != nil {
|
||||
return notifyCtx.Err()
|
||||
}
|
||||
|
||||
res := th.Results()
|
||||
|
||||
if err := computeNotificationLatencies(ctx, logger, triggerTimes, res, metrics); err != nil {
|
||||
return xerrors.Errorf("compute notification latencies: %w", err)
|
||||
}
|
||||
|
||||
for _, o := range outputs {
|
||||
err = o.write(res, inv.Stdout)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("write output %q to %q: %w", o.format, o.path, err)
|
||||
}
|
||||
}
|
||||
|
||||
if !noCleanup {
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "\nCleaning up...")
|
||||
cleanupCtx, cleanupCancel := cleanupStrategy.toContext(ctx)
|
||||
defer cleanupCancel()
|
||||
err = th.Cleanup(cleanupCtx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("cleanup tests: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if res.TotalFail > 0 {
|
||||
return xerrors.New("load test failed, see above for more details")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Options = serpent.OptionSet{
|
||||
{
|
||||
Flag: "user-count",
|
||||
FlagShorthand: "c",
|
||||
Env: "CODER_SCALETEST_NOTIFICATION_USER_COUNT",
|
||||
Description: "Required: Total number of users to create.",
|
||||
Value: serpent.Int64Of(&userCount),
|
||||
Required: true,
|
||||
},
|
||||
{
|
||||
Flag: "template-admin-percentage",
|
||||
Env: "CODER_SCALETEST_NOTIFICATION_TEMPLATE_ADMIN_PERCENTAGE",
|
||||
Default: "20.0",
|
||||
Description: "Percentage of users to assign Template Admin role to (0-100).",
|
||||
Value: serpent.Float64Of(&templateAdminPercentage),
|
||||
},
|
||||
{
|
||||
Flag: "notification-timeout",
|
||||
Env: "CODER_SCALETEST_NOTIFICATION_TIMEOUT",
|
||||
Default: "10m",
|
||||
Description: "How long to wait for notifications after triggering.",
|
||||
Value: serpent.DurationOf(¬ificationTimeout),
|
||||
},
|
||||
{
|
||||
Flag: "smtp-request-timeout",
|
||||
Env: "CODER_SCALETEST_SMTP_REQUEST_TIMEOUT",
|
||||
Default: "5m",
|
||||
Description: "Timeout for SMTP requests.",
|
||||
Value: serpent.DurationOf(&smtpRequestTimeout),
|
||||
},
|
||||
{
|
||||
Flag: "dial-timeout",
|
||||
Env: "CODER_SCALETEST_DIAL_TIMEOUT",
|
||||
Default: "10m",
|
||||
Description: "Timeout for dialing the notification websocket endpoint.",
|
||||
Value: serpent.DurationOf(&dialTimeout),
|
||||
},
|
||||
{
|
||||
Flag: "no-cleanup",
|
||||
Env: "CODER_SCALETEST_NO_CLEANUP",
|
||||
Description: "Do not clean up resources after the test completes.",
|
||||
Value: serpent.BoolOf(&noCleanup),
|
||||
},
|
||||
{
|
||||
Flag: "smtp-api-url",
|
||||
Env: "CODER_SCALETEST_SMTP_API_URL",
|
||||
Description: "SMTP mock HTTP API address.",
|
||||
Value: serpent.StringOf(&smtpAPIURL),
|
||||
},
|
||||
}
|
||||
|
||||
tracingFlags.attach(&cmd.Options)
|
||||
timeoutStrategy.attach(&cmd.Options)
|
||||
cleanupStrategy.attach(&cmd.Options)
|
||||
output.attach(&cmd.Options)
|
||||
prometheusFlags.attach(&cmd.Options)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func computeNotificationLatencies(
|
||||
ctx context.Context,
|
||||
logger slog.Logger,
|
||||
expectedNotifications map[uuid.UUID]chan time.Time,
|
||||
results harness.Results,
|
||||
metrics *notifications.Metrics,
|
||||
) error {
|
||||
triggerTimes := make(map[uuid.UUID]time.Time)
|
||||
for notificationID, triggerTimeChan := range expectedNotifications {
|
||||
select {
|
||||
case triggerTime := <-triggerTimeChan:
|
||||
triggerTimes[notificationID] = triggerTime
|
||||
logger.Info(ctx, "received trigger time",
|
||||
slog.F("notification_id", notificationID),
|
||||
slog.F("trigger_time", triggerTime))
|
||||
default:
|
||||
logger.Warn(ctx, "no trigger time received for notification",
|
||||
slog.F("notification_id", notificationID))
|
||||
}
|
||||
}
|
||||
|
||||
if len(triggerTimes) == 0 {
|
||||
logger.Warn(ctx, "no trigger times available, skipping latency computation")
|
||||
return nil
|
||||
}
|
||||
|
||||
var totalLatencies int
|
||||
for runID, runResult := range results.Runs {
|
||||
if runResult.Error != nil {
|
||||
logger.Debug(ctx, "skipping failed run for latency computation",
|
||||
slog.F("run_id", runID))
|
||||
continue
|
||||
}
|
||||
|
||||
if runResult.Metrics == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Process websocket notifications.
|
||||
if wsReceiptTimes, ok := runResult.Metrics[notifications.WebsocketNotificationReceiptTimeMetric].(map[uuid.UUID]time.Time); ok {
|
||||
for notificationID, receiptTime := range wsReceiptTimes {
|
||||
if triggerTime, ok := triggerTimes[notificationID]; ok {
|
||||
latency := receiptTime.Sub(triggerTime)
|
||||
metrics.RecordLatency(latency, notificationID.String(), notifications.NotificationTypeWebsocket)
|
||||
totalLatencies++
|
||||
logger.Debug(ctx, "computed websocket latency",
|
||||
slog.F("run_id", runID),
|
||||
slog.F("notification_id", notificationID),
|
||||
slog.F("latency", latency))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process SMTP notifications
|
||||
if smtpReceiptTimes, ok := runResult.Metrics[notifications.SMTPNotificationReceiptTimeMetric].(map[uuid.UUID]time.Time); ok {
|
||||
for notificationID, receiptTime := range smtpReceiptTimes {
|
||||
if triggerTime, ok := triggerTimes[notificationID]; ok {
|
||||
latency := receiptTime.Sub(triggerTime)
|
||||
metrics.RecordLatency(latency, notificationID.String(), notifications.NotificationTypeSMTP)
|
||||
totalLatencies++
|
||||
logger.Debug(ctx, "computed SMTP latency",
|
||||
slog.F("run_id", runID),
|
||||
slog.F("notification_id", notificationID),
|
||||
slog.F("latency", latency))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info(ctx, "finished computing notification latencies",
|
||||
slog.F("total_runs", results.TotalRuns),
|
||||
slog.F("total_latencies_computed", totalLatencies))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// triggerNotifications waits for all test users to connect,
|
||||
// then creates and deletes a test template to trigger notification events for testing.
|
||||
func triggerNotifications(
|
||||
ctx context.Context,
|
||||
logger slog.Logger,
|
||||
client *codersdk.Client,
|
||||
orgID uuid.UUID,
|
||||
dialBarrier *sync.WaitGroup,
|
||||
dialTimeout time.Duration,
|
||||
expectedNotifications map[uuid.UUID]chan time.Time,
|
||||
) {
|
||||
logger.Info(ctx, "waiting for all users to connect")
|
||||
|
||||
// Wait for all users to connect
|
||||
waitCtx, cancel := context.WithTimeout(ctx, dialTimeout+30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
dialBarrier.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
logger.Info(ctx, "all users connected")
|
||||
case <-waitCtx.Done():
|
||||
if waitCtx.Err() == context.DeadlineExceeded {
|
||||
logger.Error(ctx, "timeout waiting for users to connect")
|
||||
} else {
|
||||
logger.Info(ctx, "context canceled while waiting for users")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info(ctx, "creating test template to test notifications")
|
||||
|
||||
// Upload empty template file.
|
||||
file, err := client.Upload(ctx, codersdk.ContentTypeTar, bytes.NewReader([]byte{}))
|
||||
if err != nil {
|
||||
logger.Error(ctx, "upload test template", slog.Error(err))
|
||||
return
|
||||
}
|
||||
logger.Info(ctx, "test template uploaded", slog.F("file_id", file.ID))
|
||||
|
||||
// Create template version.
|
||||
version, err := client.CreateTemplateVersion(ctx, orgID, codersdk.CreateTemplateVersionRequest{
|
||||
StorageMethod: codersdk.ProvisionerStorageMethodFile,
|
||||
FileID: file.ID,
|
||||
Provisioner: codersdk.ProvisionerTypeEcho,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(ctx, "create test template version", slog.Error(err))
|
||||
return
|
||||
}
|
||||
logger.Info(ctx, "test template version created", slog.F("template_version_id", version.ID))
|
||||
|
||||
// Create template.
|
||||
testTemplate, err := client.CreateTemplate(ctx, orgID, codersdk.CreateTemplateRequest{
|
||||
Name: "scaletest-test-template",
|
||||
Description: "scaletest-test-template",
|
||||
VersionID: version.ID,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(ctx, "create test template", slog.Error(err))
|
||||
return
|
||||
}
|
||||
logger.Info(ctx, "test template created", slog.F("template_id", testTemplate.ID))
|
||||
|
||||
// Delete template to trigger notification.
|
||||
err = client.DeleteTemplate(ctx, testTemplate.ID)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "delete test template", slog.Error(err))
|
||||
return
|
||||
}
|
||||
logger.Info(ctx, "test template deleted", slog.F("template_id", testTemplate.ID))
|
||||
|
||||
// Record expected notification.
|
||||
expectedNotifications[notificationsLib.TemplateTemplateDeleted] <- time.Now()
|
||||
close(expectedNotifications[notificationsLib.TemplateTemplateDeleted])
|
||||
}
|
||||
@@ -0,0 +1,112 @@
|
||||
//go:build !slim
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"cdr.dev/slog/sloggers/sloghuman"
|
||||
"github.com/coder/coder/v2/scaletest/smtpmock"
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
|
||||
func (*RootCmd) scaletestSMTP() *serpent.Command {
|
||||
var (
|
||||
hostAddress string
|
||||
smtpPort int64
|
||||
apiPort int64
|
||||
purgeAtCount int64
|
||||
)
|
||||
cmd := &serpent.Command{
|
||||
Use: "smtp",
|
||||
Short: "Start a mock SMTP server for testing",
|
||||
Long: `Start a mock SMTP server with an HTTP API server that can be used to purge
|
||||
messages and get messages by email.`,
|
||||
Handler: func(inv *serpent.Invocation) error {
|
||||
ctx := inv.Context()
|
||||
notifyCtx, stop := signal.NotifyContext(ctx, StopSignals...)
|
||||
defer stop()
|
||||
ctx = notifyCtx
|
||||
|
||||
logger := slog.Make(sloghuman.Sink(inv.Stderr)).Leveled(slog.LevelInfo)
|
||||
config := smtpmock.Config{
|
||||
HostAddress: hostAddress,
|
||||
SMTPPort: int(smtpPort),
|
||||
APIPort: int(apiPort),
|
||||
Logger: logger,
|
||||
}
|
||||
srv := new(smtpmock.Server)
|
||||
|
||||
if err := srv.Start(ctx, config); err != nil {
|
||||
return xerrors.Errorf("start mock SMTP server: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = srv.Stop()
|
||||
}()
|
||||
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "Mock SMTP server started on %s\n", srv.SMTPAddress())
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "HTTP API server started on %s\n", srv.APIAddress())
|
||||
if purgeAtCount > 0 {
|
||||
_, _ = fmt.Fprintf(inv.Stdout, " Auto-purge when message count reaches %d\n", purgeAtCount)
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(10 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "\nTotal messages received since last purge: %d\n", srv.MessageCount())
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
count := srv.MessageCount()
|
||||
if count > 0 {
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "Messages received: %d\n", count)
|
||||
}
|
||||
|
||||
if purgeAtCount > 0 && int64(count) >= purgeAtCount {
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "Message count (%d) reached threshold (%d). Purging...\n", count, purgeAtCount)
|
||||
srv.Purge()
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Options = []serpent.Option{
|
||||
{
|
||||
Flag: "host-address",
|
||||
Env: "CODER_SCALETEST_SMTP_HOST_ADDRESS",
|
||||
Default: "localhost",
|
||||
Description: "Host address to bind the mock SMTP and API servers.",
|
||||
Value: serpent.StringOf(&hostAddress),
|
||||
},
|
||||
{
|
||||
Flag: "smtp-port",
|
||||
Env: "CODER_SCALETEST_SMTP_PORT",
|
||||
Description: "Port for the mock SMTP server. Uses a random port if not specified.",
|
||||
Value: serpent.Int64Of(&smtpPort),
|
||||
},
|
||||
{
|
||||
Flag: "api-port",
|
||||
Env: "CODER_SCALETEST_SMTP_API_PORT",
|
||||
Description: "Port for the HTTP API server. Uses a random port if not specified.",
|
||||
Value: serpent.Int64Of(&apiPort),
|
||||
},
|
||||
{
|
||||
Flag: "purge-at-count",
|
||||
Env: "CODER_SCALETEST_SMTP_PURGE_AT_COUNT",
|
||||
Default: "100000",
|
||||
Description: "Maximum number of messages to keep before auto-purging. Set to 0 to disable.",
|
||||
Value: serpent.Int64Of(&purgeAtCount),
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
+10
-34
@@ -5,7 +5,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/pretty"
|
||||
@@ -47,43 +46,19 @@ func (r *RootCmd) taskDelete() *serpent.Command {
|
||||
}
|
||||
exp := codersdk.NewExperimentalClient(client)
|
||||
|
||||
type toDelete struct {
|
||||
ID uuid.UUID
|
||||
Owner string
|
||||
Display string
|
||||
}
|
||||
|
||||
var items []toDelete
|
||||
var tasks []codersdk.Task
|
||||
for _, identifier := range inv.Args {
|
||||
identifier = strings.TrimSpace(identifier)
|
||||
if identifier == "" {
|
||||
return xerrors.New("task identifier cannot be empty or whitespace")
|
||||
}
|
||||
|
||||
// Check task identifier, try UUID first.
|
||||
if id, err := uuid.Parse(identifier); err == nil {
|
||||
task, err := exp.TaskByID(ctx, id)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("resolve task %q: %w", identifier, err)
|
||||
}
|
||||
display := fmt.Sprintf("%s/%s", task.OwnerName, task.Name)
|
||||
items = append(items, toDelete{ID: id, Display: display, Owner: task.OwnerName})
|
||||
continue
|
||||
}
|
||||
|
||||
// Non-UUID, treat as a workspace identifier (name or owner/name).
|
||||
ws, err := namedWorkspace(ctx, client, identifier)
|
||||
task, err := exp.TaskByIdentifier(ctx, identifier)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("resolve task %q: %w", identifier, err)
|
||||
}
|
||||
display := ws.FullName()
|
||||
items = append(items, toDelete{ID: ws.ID, Display: display, Owner: ws.OwnerName})
|
||||
tasks = append(tasks, task)
|
||||
}
|
||||
|
||||
// Confirm deletion of the tasks.
|
||||
var displayList []string
|
||||
for _, it := range items {
|
||||
displayList = append(displayList, it.Display)
|
||||
for _, task := range tasks {
|
||||
displayList = append(displayList, fmt.Sprintf("%s/%s", task.OwnerName, task.Name))
|
||||
}
|
||||
_, err = cliui.Prompt(inv, cliui.PromptOptions{
|
||||
Text: fmt.Sprintf("Delete these tasks: %s?", pretty.Sprint(cliui.DefaultStyles.Code, strings.Join(displayList, ", "))),
|
||||
@@ -94,12 +69,13 @@ func (r *RootCmd) taskDelete() *serpent.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
if err := exp.DeleteTask(ctx, item.Owner, item.ID); err != nil {
|
||||
return xerrors.Errorf("delete task %q: %w", item.Display, err)
|
||||
for i, task := range tasks {
|
||||
display := displayList[i]
|
||||
if err := exp.DeleteTask(ctx, task.OwnerName, task.ID); err != nil {
|
||||
return xerrors.Errorf("delete task %q: %w", display, err)
|
||||
}
|
||||
_, _ = fmt.Fprintln(
|
||||
inv.Stdout, "Deleted task "+pretty.Sprint(cliui.DefaultStyles.Keyword, item.Display)+" at "+cliui.Timestamp(time.Now()),
|
||||
inv.Stdout, "Deleted task "+pretty.Sprint(cliui.DefaultStyles.Keyword, display)+" at "+cliui.Timestamp(time.Now()),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
+41
-17
@@ -56,12 +56,18 @@ func TestExpTaskDelete(t *testing.T) {
|
||||
taskID := uuid.MustParse(id1)
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v2/users/me/workspace/exists":
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/experimental/tasks" && r.URL.Query().Get("q") == "owner:\"me\"":
|
||||
c.nameResolves.Add(1)
|
||||
httpapi.Write(r.Context(), w, http.StatusOK, codersdk.Workspace{
|
||||
ID: taskID,
|
||||
Name: "exists",
|
||||
OwnerName: "me",
|
||||
httpapi.Write(r.Context(), w, http.StatusOK, struct {
|
||||
Tasks []codersdk.Task `json:"tasks"`
|
||||
Count int `json:"count"`
|
||||
}{
|
||||
Tasks: []codersdk.Task{{
|
||||
ID: taskID,
|
||||
Name: "exists",
|
||||
OwnerName: "me",
|
||||
}},
|
||||
Count: 1,
|
||||
})
|
||||
case r.Method == http.MethodDelete && r.URL.Path == "/api/experimental/tasks/me/"+id1:
|
||||
c.deleteCalls.Add(1)
|
||||
@@ -104,12 +110,18 @@ func TestExpTaskDelete(t *testing.T) {
|
||||
firstID := uuid.MustParse(id3)
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v2/users/me/workspace/first":
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/experimental/tasks" && r.URL.Query().Get("q") == "owner:\"me\"":
|
||||
c.nameResolves.Add(1)
|
||||
httpapi.Write(r.Context(), w, http.StatusOK, codersdk.Workspace{
|
||||
ID: firstID,
|
||||
Name: "first",
|
||||
OwnerName: "me",
|
||||
httpapi.Write(r.Context(), w, http.StatusOK, struct {
|
||||
Tasks []codersdk.Task `json:"tasks"`
|
||||
Count int `json:"count"`
|
||||
}{
|
||||
Tasks: []codersdk.Task{{
|
||||
ID: firstID,
|
||||
Name: "first",
|
||||
OwnerName: "me",
|
||||
}},
|
||||
Count: 1,
|
||||
})
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/experimental/tasks/me/"+id4:
|
||||
httpapi.Write(r.Context(), w, http.StatusOK, codersdk.Task{
|
||||
@@ -139,8 +151,14 @@ func TestExpTaskDelete(t *testing.T) {
|
||||
buildHandler: func(_ *testCounters) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v2/users/me/workspace/doesnotexist":
|
||||
httpapi.ResourceNotFound(w)
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/experimental/tasks" && r.URL.Query().Get("q") == "owner:\"me\"":
|
||||
httpapi.Write(r.Context(), w, http.StatusOK, struct {
|
||||
Tasks []codersdk.Task `json:"tasks"`
|
||||
Count int `json:"count"`
|
||||
}{
|
||||
Tasks: []codersdk.Task{},
|
||||
Count: 0,
|
||||
})
|
||||
default:
|
||||
httpapi.InternalServerError(w, xerrors.New("unwanted path: "+r.Method+" "+r.URL.Path))
|
||||
}
|
||||
@@ -156,12 +174,18 @@ func TestExpTaskDelete(t *testing.T) {
|
||||
taskID := uuid.MustParse(id5)
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v2/users/me/workspace/bad":
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/experimental/tasks" && r.URL.Query().Get("q") == "owner:\"me\"":
|
||||
c.nameResolves.Add(1)
|
||||
httpapi.Write(r.Context(), w, http.StatusOK, codersdk.Workspace{
|
||||
ID: taskID,
|
||||
Name: "bad",
|
||||
OwnerName: "me",
|
||||
httpapi.Write(r.Context(), w, http.StatusOK, struct {
|
||||
Tasks []codersdk.Task `json:"tasks"`
|
||||
Count int `json:"count"`
|
||||
}{
|
||||
Tasks: []codersdk.Task{{
|
||||
ID: taskID,
|
||||
Name: "bad",
|
||||
OwnerName: "me",
|
||||
}},
|
||||
Count: 1,
|
||||
})
|
||||
case r.Method == http.MethodDelete && r.URL.Path == "/api/experimental/tasks/me/"+id5:
|
||||
httpapi.InternalServerError(w, xerrors.New("boom"))
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/coderd/util/slice"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
@@ -98,10 +99,10 @@ func (r *RootCmd) taskList() *serpent.Command {
|
||||
Options: serpent.OptionSet{
|
||||
{
|
||||
Name: "status",
|
||||
Description: "Filter by task status (e.g. running, failed, etc).",
|
||||
Description: "Filter by task status.",
|
||||
Flag: "status",
|
||||
Default: "",
|
||||
Value: serpent.StringOf(&statusFilter),
|
||||
Value: serpent.EnumOf(&statusFilter, slice.ToStrings(codersdk.AllTaskStatuses())...),
|
||||
},
|
||||
{
|
||||
Name: "all",
|
||||
@@ -143,7 +144,7 @@ func (r *RootCmd) taskList() *serpent.Command {
|
||||
|
||||
tasks, err := exp.Tasks(ctx, &codersdk.TasksFilter{
|
||||
Owner: targetUser,
|
||||
Status: statusFilter,
|
||||
Status: codersdk.TaskStatus(statusFilter),
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("list tasks: %w", err)
|
||||
|
||||
+23
-60
@@ -2,7 +2,6 @@ package cli_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"io"
|
||||
@@ -19,9 +18,7 @@ import (
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbfake"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/util/slice"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
@@ -29,7 +26,7 @@ import (
|
||||
)
|
||||
|
||||
// makeAITask creates an AI-task workspace.
|
||||
func makeAITask(t *testing.T, db database.Store, orgID, adminID, ownerID uuid.UUID, transition database.WorkspaceTransition, prompt string) (workspace database.WorkspaceTable) {
|
||||
func makeAITask(t *testing.T, db database.Store, orgID, adminID, ownerID uuid.UUID, transition database.WorkspaceTransition, prompt string) database.Task {
|
||||
t.Helper()
|
||||
|
||||
tv := dbfake.TemplateVersion(t, db).
|
||||
@@ -42,56 +39,22 @@ func makeAITask(t *testing.T, db database.Store, orgID, adminID, ownerID uuid.UU
|
||||
},
|
||||
}).Do()
|
||||
|
||||
ws := database.WorkspaceTable{
|
||||
build := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: orgID,
|
||||
OwnerID: ownerID,
|
||||
TemplateID: tv.Template.ID,
|
||||
}
|
||||
build := dbfake.WorkspaceBuild(t, db, ws).
|
||||
}).
|
||||
Seed(database.WorkspaceBuild{
|
||||
TemplateVersionID: tv.TemplateVersion.ID,
|
||||
Transition: transition,
|
||||
}).WithAgent().Do()
|
||||
dbgen.WorkspaceBuildParameters(t, db, []database.WorkspaceBuildParameter{
|
||||
{
|
||||
WorkspaceBuildID: build.Build.ID,
|
||||
Name: codersdk.AITaskPromptParameterName,
|
||||
Value: prompt,
|
||||
},
|
||||
})
|
||||
agents, err := db.GetWorkspaceAgentsByWorkspaceAndBuildNumber(
|
||||
dbauthz.AsSystemRestricted(context.Background()),
|
||||
database.GetWorkspaceAgentsByWorkspaceAndBuildNumberParams{
|
||||
WorkspaceID: build.Workspace.ID,
|
||||
BuildNumber: build.Build.BuildNumber,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, agents)
|
||||
agentID := agents[0].ID
|
||||
}).
|
||||
WithAgent().
|
||||
WithTask(database.TaskTable{
|
||||
Prompt: prompt,
|
||||
}, nil).
|
||||
Do()
|
||||
|
||||
// Create a workspace app and set it as the sidebar app.
|
||||
app := dbgen.WorkspaceApp(t, db, database.WorkspaceApp{
|
||||
AgentID: agentID,
|
||||
Slug: "task-sidebar",
|
||||
DisplayName: "Task Sidebar",
|
||||
External: false,
|
||||
})
|
||||
|
||||
// Update build flags to reference the sidebar app and HasAITask=true.
|
||||
err = db.UpdateWorkspaceBuildFlagsByID(
|
||||
dbauthz.AsSystemRestricted(context.Background()),
|
||||
database.UpdateWorkspaceBuildFlagsByIDParams{
|
||||
ID: build.Build.ID,
|
||||
HasAITask: sql.NullBool{Bool: true, Valid: true},
|
||||
HasExternalAgent: sql.NullBool{Bool: false, Valid: false},
|
||||
SidebarAppID: uuid.NullUUID{UUID: app.ID, Valid: true},
|
||||
UpdatedAt: build.Build.UpdatedAt,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
return build.Workspace
|
||||
return build.Task
|
||||
}
|
||||
|
||||
func TestExpTaskList(t *testing.T) {
|
||||
@@ -128,7 +91,7 @@ func TestExpTaskList(t *testing.T) {
|
||||
memberClient, memberUser := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
|
||||
wantPrompt := "build me a web app"
|
||||
ws := makeAITask(t, db, owner.OrganizationID, owner.UserID, memberUser.ID, database.WorkspaceTransitionStart, wantPrompt)
|
||||
task := makeAITask(t, db, owner.OrganizationID, owner.UserID, memberUser.ID, database.WorkspaceTransitionStart, wantPrompt)
|
||||
|
||||
inv, root := clitest.New(t, "exp", "task", "list", "--column", "id,name,status,initial prompt")
|
||||
clitest.SetupConfig(t, memberClient, root)
|
||||
@@ -140,8 +103,8 @@ func TestExpTaskList(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Validate the table includes the task and status.
|
||||
pty.ExpectMatch(ws.Name)
|
||||
pty.ExpectMatch("running")
|
||||
pty.ExpectMatch(task.Name)
|
||||
pty.ExpectMatch("initializing")
|
||||
pty.ExpectMatch(wantPrompt)
|
||||
})
|
||||
|
||||
@@ -154,12 +117,12 @@ func TestExpTaskList(t *testing.T) {
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
memberClient, memberUser := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
|
||||
// Create two AI tasks: one running, one stopped.
|
||||
running := makeAITask(t, db, owner.OrganizationID, owner.UserID, memberUser.ID, database.WorkspaceTransitionStart, "keep me running")
|
||||
stopped := makeAITask(t, db, owner.OrganizationID, owner.UserID, memberUser.ID, database.WorkspaceTransitionStop, "stop me please")
|
||||
// Create two AI tasks: one initializing, one paused.
|
||||
initializingTask := makeAITask(t, db, owner.OrganizationID, owner.UserID, memberUser.ID, database.WorkspaceTransitionStart, "keep me initializing")
|
||||
pausedTask := makeAITask(t, db, owner.OrganizationID, owner.UserID, memberUser.ID, database.WorkspaceTransitionStop, "stop me please")
|
||||
|
||||
// Use JSON output to reliably validate filtering.
|
||||
inv, root := clitest.New(t, "exp", "task", "list", "--status=stopped", "--output=json")
|
||||
inv, root := clitest.New(t, "exp", "task", "list", "--status=paused", "--output=json")
|
||||
clitest.SetupConfig(t, memberClient, root)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
@@ -173,10 +136,10 @@ func TestExpTaskList(t *testing.T) {
|
||||
var tasks []codersdk.Task
|
||||
require.NoError(t, json.Unmarshal(stdout.Bytes(), &tasks))
|
||||
|
||||
// Only the stopped task is returned.
|
||||
// Only the paused task is returned.
|
||||
require.Len(t, tasks, 1, "expected one task after filtering")
|
||||
require.Equal(t, stopped.ID, tasks[0].ID)
|
||||
require.NotEqual(t, running.ID, tasks[0].ID)
|
||||
require.Equal(t, pausedTask.ID, tasks[0].ID)
|
||||
require.NotEqual(t, initializingTask.ID, tasks[0].ID)
|
||||
})
|
||||
|
||||
t.Run("UserFlag_Me_Table", func(t *testing.T) {
|
||||
@@ -188,7 +151,7 @@ func TestExpTaskList(t *testing.T) {
|
||||
_, memberUser := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
|
||||
_ = makeAITask(t, db, owner.OrganizationID, owner.UserID, memberUser.ID, database.WorkspaceTransitionStart, "other-task")
|
||||
ws := makeAITask(t, db, owner.OrganizationID, owner.UserID, owner.UserID, database.WorkspaceTransitionStart, "me-task")
|
||||
task := makeAITask(t, db, owner.OrganizationID, owner.UserID, owner.UserID, database.WorkspaceTransitionStart, "me-task")
|
||||
|
||||
inv, root := clitest.New(t, "exp", "task", "list", "--user", "me")
|
||||
//nolint:gocritic // Owner client is intended here smoke test the member task not showing up.
|
||||
@@ -200,7 +163,7 @@ func TestExpTaskList(t *testing.T) {
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
pty.ExpectMatch(ws.Name)
|
||||
pty.ExpectMatch(task.Name)
|
||||
})
|
||||
|
||||
t.Run("Quiet", func(t *testing.T) {
|
||||
@@ -213,7 +176,7 @@ func TestExpTaskList(t *testing.T) {
|
||||
memberClient, memberUser := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
|
||||
// Given: We have two tasks
|
||||
task1 := makeAITask(t, db, owner.OrganizationID, owner.UserID, memberUser.ID, database.WorkspaceTransitionStart, "keep me running")
|
||||
task1 := makeAITask(t, db, owner.OrganizationID, owner.UserID, memberUser.ID, database.WorkspaceTransitionStart, "keep me active")
|
||||
task2 := makeAITask(t, db, owner.OrganizationID, owner.UserID, memberUser.ID, database.WorkspaceTransitionStop, "stop me please")
|
||||
|
||||
// Given: We add the `--quiet` flag
|
||||
|
||||
+7
-15
@@ -3,7 +3,6 @@ package cli
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
@@ -41,24 +40,17 @@ func (r *RootCmd) taskLogs() *serpent.Command {
|
||||
}
|
||||
|
||||
var (
|
||||
ctx = inv.Context()
|
||||
exp = codersdk.NewExperimentalClient(client)
|
||||
task = inv.Args[0]
|
||||
taskID uuid.UUID
|
||||
ctx = inv.Context()
|
||||
exp = codersdk.NewExperimentalClient(client)
|
||||
identifier = inv.Args[0]
|
||||
)
|
||||
|
||||
if id, err := uuid.Parse(task); err == nil {
|
||||
taskID = id
|
||||
} else {
|
||||
ws, err := namedWorkspace(ctx, client, task)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("resolve task %q: %w", task, err)
|
||||
}
|
||||
|
||||
taskID = ws.ID
|
||||
task, err := exp.TaskByIdentifier(ctx, identifier)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("resolve task %q: %w", identifier, err)
|
||||
}
|
||||
|
||||
logs, err := exp.TaskLogs(ctx, codersdk.Me, taskID)
|
||||
logs, err := exp.TaskLogs(ctx, codersdk.Me, task.ID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get task logs: %w", err)
|
||||
}
|
||||
|
||||
+13
-13
@@ -38,15 +38,15 @@ func Test_TaskLogs(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("ByWorkspaceName_JSON", func(t *testing.T) {
|
||||
t.Run("ByTaskName_JSON", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
client, workspace := setupCLITaskTest(ctx, t, fakeAgentAPITaskLogsOK(testMessages))
|
||||
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskLogsOK(testMessages))
|
||||
userClient := client // user already has access to their own workspace
|
||||
|
||||
var stdout strings.Builder
|
||||
inv, root := clitest.New(t, "exp", "task", "logs", workspace.Name, "--output", "json")
|
||||
inv, root := clitest.New(t, "exp", "task", "logs", task.Name, "--output", "json")
|
||||
inv.Stdout = &stdout
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
@@ -64,15 +64,15 @@ func Test_TaskLogs(t *testing.T) {
|
||||
require.Equal(t, codersdk.TaskLogTypeOutput, logs[1].Type)
|
||||
})
|
||||
|
||||
t.Run("ByWorkspaceID_JSON", func(t *testing.T) {
|
||||
t.Run("ByTaskID_JSON", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
client, workspace := setupCLITaskTest(ctx, t, fakeAgentAPITaskLogsOK(testMessages))
|
||||
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskLogsOK(testMessages))
|
||||
userClient := client
|
||||
|
||||
var stdout strings.Builder
|
||||
inv, root := clitest.New(t, "exp", "task", "logs", workspace.ID.String(), "--output", "json")
|
||||
inv, root := clitest.New(t, "exp", "task", "logs", task.ID.String(), "--output", "json")
|
||||
inv.Stdout = &stdout
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
@@ -90,15 +90,15 @@ func Test_TaskLogs(t *testing.T) {
|
||||
require.Equal(t, codersdk.TaskLogTypeOutput, logs[1].Type)
|
||||
})
|
||||
|
||||
t.Run("ByWorkspaceID_Table", func(t *testing.T) {
|
||||
t.Run("ByTaskID_Table", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
client, workspace := setupCLITaskTest(ctx, t, fakeAgentAPITaskLogsOK(testMessages))
|
||||
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskLogsOK(testMessages))
|
||||
userClient := client
|
||||
|
||||
var stdout strings.Builder
|
||||
inv, root := clitest.New(t, "exp", "task", "logs", workspace.ID.String())
|
||||
inv, root := clitest.New(t, "exp", "task", "logs", task.ID.String())
|
||||
inv.Stdout = &stdout
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
@@ -112,7 +112,7 @@ func Test_TaskLogs(t *testing.T) {
|
||||
require.Contains(t, output, "output")
|
||||
})
|
||||
|
||||
t.Run("WorkspaceNotFound_ByName", func(t *testing.T) {
|
||||
t.Run("TaskNotFound_ByName", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
@@ -130,7 +130,7 @@ func Test_TaskLogs(t *testing.T) {
|
||||
require.ErrorContains(t, err, httpapi.ResourceNotFoundResponse.Message)
|
||||
})
|
||||
|
||||
t.Run("WorkspaceNotFound_ByID", func(t *testing.T) {
|
||||
t.Run("TaskNotFound_ByID", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
@@ -152,10 +152,10 @@ func Test_TaskLogs(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
client, workspace := setupCLITaskTest(ctx, t, fakeAgentAPITaskLogsErr(assert.AnError))
|
||||
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskLogsErr(assert.AnError))
|
||||
userClient := client
|
||||
|
||||
inv, root := clitest.New(t, "exp", "task", "logs", workspace.ID.String())
|
||||
inv, root := clitest.New(t, "exp", "task", "logs", task.ID.String())
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
err := inv.WithContext(ctx).Run()
|
||||
|
||||
+7
-15
@@ -3,7 +3,6 @@ package cli
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
@@ -39,12 +38,11 @@ func (r *RootCmd) taskSend() *serpent.Command {
|
||||
}
|
||||
|
||||
var (
|
||||
ctx = inv.Context()
|
||||
exp = codersdk.NewExperimentalClient(client)
|
||||
task = inv.Args[0]
|
||||
ctx = inv.Context()
|
||||
exp = codersdk.NewExperimentalClient(client)
|
||||
identifier = inv.Args[0]
|
||||
|
||||
taskInput string
|
||||
taskID uuid.UUID
|
||||
)
|
||||
|
||||
if stdin {
|
||||
@@ -62,18 +60,12 @@ func (r *RootCmd) taskSend() *serpent.Command {
|
||||
taskInput = inv.Args[1]
|
||||
}
|
||||
|
||||
if id, err := uuid.Parse(task); err == nil {
|
||||
taskID = id
|
||||
} else {
|
||||
ws, err := namedWorkspace(ctx, client, task)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("resolve task: %w", err)
|
||||
}
|
||||
|
||||
taskID = ws.ID
|
||||
task, err := exp.TaskByIdentifier(ctx, identifier)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("resolve task: %w", err)
|
||||
}
|
||||
|
||||
if err = exp.TaskSend(ctx, codersdk.Me, taskID, codersdk.TaskSendRequest{Input: taskInput}); err != nil {
|
||||
if err = exp.TaskSend(ctx, codersdk.Me, task.ID, codersdk.TaskSendRequest{Input: taskInput}); err != nil {
|
||||
return xerrors.Errorf("send input to task: %w", err)
|
||||
}
|
||||
|
||||
|
||||
+13
-13
@@ -22,15 +22,15 @@ import (
|
||||
func Test_TaskSend(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("ByWorkspaceName_WithArgument", func(t *testing.T) {
|
||||
t.Run("ByTaskName_WithArgument", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
client, workspace := setupCLITaskTest(ctx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
|
||||
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
|
||||
userClient := client
|
||||
|
||||
var stdout strings.Builder
|
||||
inv, root := clitest.New(t, "exp", "task", "send", workspace.Name, "carry on with the task")
|
||||
inv, root := clitest.New(t, "exp", "task", "send", task.Name, "carry on with the task")
|
||||
inv.Stdout = &stdout
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
@@ -38,15 +38,15 @@ func Test_TaskSend(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("ByWorkspaceID_WithArgument", func(t *testing.T) {
|
||||
t.Run("ByTaskID_WithArgument", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
client, workspace := setupCLITaskTest(ctx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
|
||||
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
|
||||
userClient := client
|
||||
|
||||
var stdout strings.Builder
|
||||
inv, root := clitest.New(t, "exp", "task", "send", workspace.ID.String(), "carry on with the task")
|
||||
inv, root := clitest.New(t, "exp", "task", "send", task.ID.String(), "carry on with the task")
|
||||
inv.Stdout = &stdout
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
@@ -54,15 +54,15 @@ func Test_TaskSend(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("ByWorkspaceName_WithStdin", func(t *testing.T) {
|
||||
t.Run("ByTaskName_WithStdin", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
client, workspace := setupCLITaskTest(ctx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
|
||||
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
|
||||
userClient := client
|
||||
|
||||
var stdout strings.Builder
|
||||
inv, root := clitest.New(t, "exp", "task", "send", workspace.Name, "--stdin")
|
||||
inv, root := clitest.New(t, "exp", "task", "send", task.Name, "--stdin")
|
||||
inv.Stdout = &stdout
|
||||
inv.Stdin = strings.NewReader("carry on with the task")
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
@@ -71,7 +71,7 @@ func Test_TaskSend(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("WorkspaceNotFound_ByName", func(t *testing.T) {
|
||||
t.Run("TaskNotFound_ByName", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
@@ -89,7 +89,7 @@ func Test_TaskSend(t *testing.T) {
|
||||
require.ErrorContains(t, err, httpapi.ResourceNotFoundResponse.Message)
|
||||
})
|
||||
|
||||
t.Run("WorkspaceNotFound_ByID", func(t *testing.T) {
|
||||
t.Run("TaskNotFound_ByID", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
@@ -111,10 +111,10 @@ func Test_TaskSend(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
userClient, workspace := setupCLITaskTest(ctx, t, fakeAgentAPITaskSendErr(t, assert.AnError))
|
||||
userClient, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskSendErr(t, assert.AnError))
|
||||
|
||||
var stdout strings.Builder
|
||||
inv, root := clitest.New(t, "exp", "task", "send", workspace.Name, "some task input")
|
||||
inv, root := clitest.New(t, "exp", "task", "send", task.Name, "some task input")
|
||||
inv.Stdout = &stdout
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
|
||||
+22
-31
@@ -5,7 +5,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
@@ -84,21 +83,10 @@ func (r *RootCmd) taskStatus() *serpent.Command {
|
||||
}
|
||||
|
||||
ctx := i.Context()
|
||||
ec := codersdk.NewExperimentalClient(client)
|
||||
exp := codersdk.NewExperimentalClient(client)
|
||||
identifier := i.Args[0]
|
||||
|
||||
taskID, err := uuid.Parse(identifier)
|
||||
if err != nil {
|
||||
// Try to resolve the task as a named workspace
|
||||
// TODO: right now tasks are still "workspaces" under the hood.
|
||||
// We should update this once we have a proper task model.
|
||||
ws, err := namedWorkspace(ctx, client, identifier)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
taskID = ws.ID
|
||||
}
|
||||
task, err := ec.TaskByID(ctx, taskID)
|
||||
task, err := exp.TaskByIdentifier(ctx, identifier)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -119,7 +107,7 @@ func (r *RootCmd) taskStatus() *serpent.Command {
|
||||
// TODO: implement streaming updates instead of polling
|
||||
lastStatusRow := tsr
|
||||
for range t.C {
|
||||
task, err := ec.TaskByID(ctx, taskID)
|
||||
task, err := exp.TaskByID(ctx, task.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -152,7 +140,7 @@ func (r *RootCmd) taskStatus() *serpent.Command {
|
||||
}
|
||||
|
||||
func taskWatchIsEnded(task codersdk.Task) bool {
|
||||
if task.Status == codersdk.WorkspaceStatusStopped {
|
||||
if task.WorkspaceStatus == codersdk.WorkspaceStatusStopped {
|
||||
return true
|
||||
}
|
||||
if task.WorkspaceAgentHealth == nil || !task.WorkspaceAgentHealth.Healthy {
|
||||
@@ -168,28 +156,21 @@ func taskWatchIsEnded(task codersdk.Task) bool {
|
||||
}
|
||||
|
||||
type taskStatusRow struct {
|
||||
codersdk.Task `table:"-"`
|
||||
ChangedAgo string `json:"-" table:"state changed,default_sort"`
|
||||
Timestamp time.Time `json:"-" table:"-"`
|
||||
TaskStatus string `json:"-" table:"status"`
|
||||
Healthy bool `json:"-" table:"healthy"`
|
||||
TaskState string `json:"-" table:"state"`
|
||||
Message string `json:"-" table:"message"`
|
||||
codersdk.Task `table:"r,recursive_inline"`
|
||||
ChangedAgo string `json:"-" table:"state changed"`
|
||||
Healthy bool `json:"-" table:"healthy"`
|
||||
}
|
||||
|
||||
func taskStatusRowEqual(r1, r2 taskStatusRow) bool {
|
||||
return r1.TaskStatus == r2.TaskStatus &&
|
||||
return r1.Status == r2.Status &&
|
||||
r1.Healthy == r2.Healthy &&
|
||||
r1.TaskState == r2.TaskState &&
|
||||
r1.Message == r2.Message
|
||||
taskStateEqual(r1.CurrentState, r2.CurrentState)
|
||||
}
|
||||
|
||||
func toStatusRow(task codersdk.Task) taskStatusRow {
|
||||
tsr := taskStatusRow{
|
||||
Task: task,
|
||||
ChangedAgo: time.Since(task.UpdatedAt).Truncate(time.Second).String() + " ago",
|
||||
Timestamp: task.UpdatedAt,
|
||||
TaskStatus: string(task.Status),
|
||||
}
|
||||
tsr.Healthy = task.WorkspaceAgentHealth != nil &&
|
||||
task.WorkspaceAgentHealth.Healthy &&
|
||||
@@ -199,9 +180,19 @@ func toStatusRow(task codersdk.Task) taskStatusRow {
|
||||
|
||||
if task.CurrentState != nil {
|
||||
tsr.ChangedAgo = time.Since(task.CurrentState.Timestamp).Truncate(time.Second).String() + " ago"
|
||||
tsr.Timestamp = task.CurrentState.Timestamp
|
||||
tsr.TaskState = string(task.CurrentState.State)
|
||||
tsr.Message = task.CurrentState.Message
|
||||
}
|
||||
return tsr
|
||||
}
|
||||
|
||||
func taskStateEqual(se1, se2 *codersdk.TaskStateEntry) bool {
|
||||
var s1, m1, s2, m2 string
|
||||
if se1 != nil {
|
||||
s1 = string(se1.State)
|
||||
m1 = se1.Message
|
||||
}
|
||||
if se2 != nil {
|
||||
s2 = string(se2.State)
|
||||
m2 = se2.Message
|
||||
}
|
||||
return s1 == s2 && m1 == m2
|
||||
}
|
||||
|
||||
+149
-69
@@ -36,26 +36,17 @@ func Test_TaskStatus(t *testing.T) {
|
||||
hf: func(ctx context.Context, _ time.Time) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/api/v2/users/me/workspace/doesnotexist":
|
||||
httpapi.ResourceNotFound(w)
|
||||
default:
|
||||
t.Errorf("unexpected path: %s", r.URL.Path)
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
args: []string{"err-fetching-workspace"},
|
||||
expectError: assert.AnError.Error(),
|
||||
hf: func(ctx context.Context, _ time.Time) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/api/v2/users/me/workspace/err-fetching-workspace":
|
||||
httpapi.Write(ctx, w, http.StatusOK, codersdk.Workspace{
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
})
|
||||
case "/api/experimental/tasks/me/11111111-1111-1111-1111-111111111111":
|
||||
httpapi.InternalServerError(w, assert.AnError)
|
||||
case "/api/experimental/tasks":
|
||||
if r.URL.Query().Get("q") == "owner:\"me\"" {
|
||||
httpapi.Write(ctx, w, http.StatusOK, struct {
|
||||
Tasks []codersdk.Task `json:"tasks"`
|
||||
Count int `json:"count"`
|
||||
}{
|
||||
Tasks: []codersdk.Task{},
|
||||
Count: 0,
|
||||
})
|
||||
return
|
||||
}
|
||||
default:
|
||||
t.Errorf("unexpected path: %s", r.URL.Path)
|
||||
}
|
||||
@@ -64,21 +55,45 @@ func Test_TaskStatus(t *testing.T) {
|
||||
},
|
||||
{
|
||||
args: []string{"exists"},
|
||||
expectOutput: `STATE CHANGED STATUS HEALTHY STATE MESSAGE
|
||||
0s ago running true working Thinking furiously...`,
|
||||
expectOutput: `STATE CHANGED STATUS HEALTHY STATE MESSAGE
|
||||
0s ago active true working Thinking furiously...`,
|
||||
hf: func(ctx context.Context, now time.Time) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/api/v2/users/me/workspace/exists":
|
||||
httpapi.Write(ctx, w, http.StatusOK, codersdk.Workspace{
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
})
|
||||
case "/api/experimental/tasks":
|
||||
if r.URL.Query().Get("q") == "owner:\"me\"" {
|
||||
httpapi.Write(ctx, w, http.StatusOK, struct {
|
||||
Tasks []codersdk.Task `json:"tasks"`
|
||||
Count int `json:"count"`
|
||||
}{
|
||||
Tasks: []codersdk.Task{{
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
Name: "exists",
|
||||
OwnerName: "me",
|
||||
WorkspaceStatus: codersdk.WorkspaceStatusRunning,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
CurrentState: &codersdk.TaskStateEntry{
|
||||
State: codersdk.TaskStateWorking,
|
||||
Timestamp: now,
|
||||
Message: "Thinking furiously...",
|
||||
},
|
||||
WorkspaceAgentHealth: &codersdk.WorkspaceAgentHealth{
|
||||
Healthy: true,
|
||||
},
|
||||
WorkspaceAgentLifecycle: ptr.Ref(codersdk.WorkspaceAgentLifecycleReady),
|
||||
Status: codersdk.TaskStatusActive,
|
||||
}},
|
||||
Count: 1,
|
||||
})
|
||||
return
|
||||
}
|
||||
case "/api/experimental/tasks/me/11111111-1111-1111-1111-111111111111":
|
||||
httpapi.Write(ctx, w, http.StatusOK, codersdk.Task{
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
Status: codersdk.WorkspaceStatusRunning,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
WorkspaceStatus: codersdk.WorkspaceStatusRunning,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
CurrentState: &codersdk.TaskStateEntry{
|
||||
State: codersdk.TaskStateWorking,
|
||||
Timestamp: now,
|
||||
@@ -88,7 +103,9 @@ func Test_TaskStatus(t *testing.T) {
|
||||
Healthy: true,
|
||||
},
|
||||
WorkspaceAgentLifecycle: ptr.Ref(codersdk.WorkspaceAgentLifecycleReady),
|
||||
Status: codersdk.TaskStatusActive,
|
||||
})
|
||||
return
|
||||
default:
|
||||
t.Errorf("unexpected path: %s", r.URL.Path)
|
||||
}
|
||||
@@ -97,50 +114,77 @@ func Test_TaskStatus(t *testing.T) {
|
||||
},
|
||||
{
|
||||
args: []string{"exists", "--watch"},
|
||||
expectOutput: `
|
||||
STATE CHANGED STATUS HEALTHY STATE MESSAGE
|
||||
4s ago running true
|
||||
3s ago running true working Reticulating splines...
|
||||
2s ago running true complete Splines reticulated successfully!`,
|
||||
expectOutput: `STATE CHANGED STATUS HEALTHY STATE MESSAGE
|
||||
5s ago pending true
|
||||
4s ago initializing true
|
||||
4s ago active true
|
||||
3s ago active true working Reticulating splines...
|
||||
2s ago active true complete Splines reticulated successfully!`,
|
||||
hf: func(ctx context.Context, now time.Time) func(http.ResponseWriter, *http.Request) {
|
||||
var calls atomic.Int64
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
defer calls.Add(1)
|
||||
switch r.URL.Path {
|
||||
case "/api/v2/users/me/workspace/exists":
|
||||
httpapi.Write(ctx, w, http.StatusOK, codersdk.Workspace{
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
})
|
||||
case "/api/experimental/tasks":
|
||||
if r.URL.Query().Get("q") == "owner:\"me\"" {
|
||||
// Return initial task state for --watch test
|
||||
httpapi.Write(ctx, w, http.StatusOK, struct {
|
||||
Tasks []codersdk.Task `json:"tasks"`
|
||||
Count int `json:"count"`
|
||||
}{
|
||||
Tasks: []codersdk.Task{{
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
Name: "exists",
|
||||
OwnerName: "me",
|
||||
WorkspaceStatus: codersdk.WorkspaceStatusPending,
|
||||
CreatedAt: now.Add(-5 * time.Second),
|
||||
UpdatedAt: now.Add(-5 * time.Second),
|
||||
WorkspaceAgentHealth: &codersdk.WorkspaceAgentHealth{
|
||||
Healthy: true,
|
||||
},
|
||||
WorkspaceAgentLifecycle: ptr.Ref(codersdk.WorkspaceAgentLifecycleReady),
|
||||
Status: codersdk.TaskStatusPending,
|
||||
}},
|
||||
Count: 1,
|
||||
})
|
||||
return
|
||||
}
|
||||
case "/api/experimental/tasks/me/11111111-1111-1111-1111-111111111111":
|
||||
defer calls.Add(1)
|
||||
switch calls.Load() {
|
||||
case 0:
|
||||
httpapi.Write(ctx, w, http.StatusOK, codersdk.Task{
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
Status: codersdk.WorkspaceStatusPending,
|
||||
CreatedAt: now.Add(-5 * time.Second),
|
||||
UpdatedAt: now.Add(-5 * time.Second),
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
Name: "exists",
|
||||
OwnerName: "me",
|
||||
WorkspaceStatus: codersdk.WorkspaceStatusRunning,
|
||||
CreatedAt: now.Add(-5 * time.Second),
|
||||
UpdatedAt: now.Add(-4 * time.Second),
|
||||
WorkspaceAgentHealth: &codersdk.WorkspaceAgentHealth{
|
||||
Healthy: true,
|
||||
},
|
||||
WorkspaceAgentLifecycle: ptr.Ref(codersdk.WorkspaceAgentLifecycleReady),
|
||||
Status: codersdk.TaskStatusInitializing,
|
||||
})
|
||||
return
|
||||
case 1:
|
||||
httpapi.Write(ctx, w, http.StatusOK, codersdk.Task{
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
Status: codersdk.WorkspaceStatusRunning,
|
||||
CreatedAt: now.Add(-5 * time.Second),
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
WorkspaceStatus: codersdk.WorkspaceStatusRunning,
|
||||
CreatedAt: now.Add(-5 * time.Second),
|
||||
WorkspaceAgentHealth: &codersdk.WorkspaceAgentHealth{
|
||||
Healthy: true,
|
||||
},
|
||||
WorkspaceAgentLifecycle: ptr.Ref(codersdk.WorkspaceAgentLifecycleReady),
|
||||
UpdatedAt: now.Add(-4 * time.Second),
|
||||
Status: codersdk.TaskStatusActive,
|
||||
})
|
||||
return
|
||||
case 2:
|
||||
httpapi.Write(ctx, w, http.StatusOK, codersdk.Task{
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
Status: codersdk.WorkspaceStatusRunning,
|
||||
CreatedAt: now.Add(-5 * time.Second),
|
||||
UpdatedAt: now.Add(-4 * time.Second),
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
WorkspaceStatus: codersdk.WorkspaceStatusRunning,
|
||||
CreatedAt: now.Add(-5 * time.Second),
|
||||
UpdatedAt: now.Add(-4 * time.Second),
|
||||
WorkspaceAgentHealth: &codersdk.WorkspaceAgentHealth{
|
||||
Healthy: true,
|
||||
},
|
||||
@@ -150,13 +194,15 @@ STATE CHANGED STATUS HEALTHY STATE MESSAGE
|
||||
Timestamp: now.Add(-3 * time.Second),
|
||||
Message: "Reticulating splines...",
|
||||
},
|
||||
Status: codersdk.TaskStatusActive,
|
||||
})
|
||||
return
|
||||
case 3:
|
||||
httpapi.Write(ctx, w, http.StatusOK, codersdk.Task{
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
Status: codersdk.WorkspaceStatusRunning,
|
||||
CreatedAt: now.Add(-5 * time.Second),
|
||||
UpdatedAt: now.Add(-4 * time.Second),
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
WorkspaceStatus: codersdk.WorkspaceStatusRunning,
|
||||
CreatedAt: now.Add(-5 * time.Second),
|
||||
UpdatedAt: now.Add(-4 * time.Second),
|
||||
WorkspaceAgentHealth: &codersdk.WorkspaceAgentHealth{
|
||||
Healthy: true,
|
||||
},
|
||||
@@ -166,13 +212,16 @@ STATE CHANGED STATUS HEALTHY STATE MESSAGE
|
||||
Timestamp: now.Add(-2 * time.Second),
|
||||
Message: "Splines reticulated successfully!",
|
||||
},
|
||||
Status: codersdk.TaskStatusActive,
|
||||
})
|
||||
return
|
||||
default:
|
||||
httpapi.InternalServerError(w, xerrors.New("too many calls!"))
|
||||
return
|
||||
}
|
||||
default:
|
||||
httpapi.InternalServerError(w, xerrors.Errorf("unexpected path: %q", r.URL.Path))
|
||||
return
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -183,19 +232,24 @@ STATE CHANGED STATUS HEALTHY STATE MESSAGE
|
||||
"id": "11111111-1111-1111-1111-111111111111",
|
||||
"organization_id": "00000000-0000-0000-0000-000000000000",
|
||||
"owner_id": "00000000-0000-0000-0000-000000000000",
|
||||
"owner_name": "",
|
||||
"name": "",
|
||||
"owner_name": "me",
|
||||
"name": "exists",
|
||||
"template_id": "00000000-0000-0000-0000-000000000000",
|
||||
"template_version_id": "00000000-0000-0000-0000-000000000000",
|
||||
"template_name": "",
|
||||
"template_display_name": "",
|
||||
"template_icon": "",
|
||||
"workspace_id": null,
|
||||
"workspace_name": "",
|
||||
"workspace_status": "running",
|
||||
"workspace_agent_id": null,
|
||||
"workspace_agent_lifecycle": null,
|
||||
"workspace_agent_health": null,
|
||||
"workspace_agent_lifecycle": "ready",
|
||||
"workspace_agent_health": {
|
||||
"healthy": true
|
||||
},
|
||||
"workspace_app_id": null,
|
||||
"initial_prompt": "",
|
||||
"status": "running",
|
||||
"status": "active",
|
||||
"current_state": {
|
||||
"timestamp": "2025-08-26T12:34:57Z",
|
||||
"state": "working",
|
||||
@@ -205,26 +259,52 @@ STATE CHANGED STATUS HEALTHY STATE MESSAGE
|
||||
"created_at": "2025-08-26T12:34:56Z",
|
||||
"updated_at": "2025-08-26T12:34:56Z"
|
||||
}`,
|
||||
hf: func(ctx context.Context, _ time.Time) func(w http.ResponseWriter, r *http.Request) {
|
||||
hf: func(ctx context.Context, now time.Time) func(http.ResponseWriter, *http.Request) {
|
||||
ts := time.Date(2025, 8, 26, 12, 34, 56, 0, time.UTC)
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/api/v2/users/me/workspace/exists":
|
||||
httpapi.Write(ctx, w, http.StatusOK, codersdk.Workspace{
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
})
|
||||
case "/api/experimental/tasks":
|
||||
if r.URL.Query().Get("q") == "owner:\"me\"" {
|
||||
httpapi.Write(ctx, w, http.StatusOK, struct {
|
||||
Tasks []codersdk.Task `json:"tasks"`
|
||||
Count int `json:"count"`
|
||||
}{
|
||||
Tasks: []codersdk.Task{{
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
Name: "exists",
|
||||
OwnerName: "me",
|
||||
WorkspaceStatus: codersdk.WorkspaceStatusRunning,
|
||||
CreatedAt: ts,
|
||||
UpdatedAt: ts,
|
||||
CurrentState: &codersdk.TaskStateEntry{
|
||||
State: codersdk.TaskStateWorking,
|
||||
Timestamp: ts.Add(time.Second),
|
||||
Message: "Thinking furiously...",
|
||||
},
|
||||
WorkspaceAgentHealth: &codersdk.WorkspaceAgentHealth{
|
||||
Healthy: true,
|
||||
},
|
||||
WorkspaceAgentLifecycle: ptr.Ref(codersdk.WorkspaceAgentLifecycleReady),
|
||||
Status: codersdk.TaskStatusActive,
|
||||
}},
|
||||
Count: 1,
|
||||
})
|
||||
return
|
||||
}
|
||||
case "/api/experimental/tasks/me/11111111-1111-1111-1111-111111111111":
|
||||
httpapi.Write(ctx, w, http.StatusOK, codersdk.Task{
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
Status: codersdk.WorkspaceStatusRunning,
|
||||
CreatedAt: ts,
|
||||
UpdatedAt: ts,
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
WorkspaceStatus: codersdk.WorkspaceStatusRunning,
|
||||
CreatedAt: ts,
|
||||
UpdatedAt: ts,
|
||||
CurrentState: &codersdk.TaskStateEntry{
|
||||
State: codersdk.TaskStateWorking,
|
||||
Timestamp: ts.Add(time.Second),
|
||||
Message: "Thinking furiously...",
|
||||
},
|
||||
Status: codersdk.TaskStatusActive,
|
||||
})
|
||||
return
|
||||
default:
|
||||
t.Errorf("unexpected path: %s", r.URL.Path)
|
||||
}
|
||||
|
||||
+230
-10
@@ -2,26 +2,242 @@ package cli_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
agentapisdk "github.com/coder/agentapi-sdk-go"
|
||||
"github.com/coder/coder/v2/agent"
|
||||
"github.com/coder/coder/v2/agent/agenttest"
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/util/ptr"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
"github.com/coder/coder/v2/provisioner/echo"
|
||||
"github.com/coder/coder/v2/provisionersdk/proto"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
// This test performs an integration-style test for tasks functionality.
|
||||
//
|
||||
//nolint:tparallel // The sub-tests of this test must be run sequentially.
|
||||
func Test_Tasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Given: a template configured for tasks
|
||||
var (
|
||||
ctx = testutil.Context(t, testutil.WaitLong)
|
||||
client = coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
owner = coderdtest.CreateFirstUser(t, client)
|
||||
userClient, _ = coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
initMsg = agentapisdk.Message{
|
||||
Content: "test task input for " + t.Name(),
|
||||
Id: 0,
|
||||
Role: "user",
|
||||
Time: time.Now().UTC(),
|
||||
}
|
||||
authToken = uuid.NewString()
|
||||
echoAgentAPI = startFakeAgentAPI(t, fakeAgentAPIEcho(ctx, t, initMsg, "hello"))
|
||||
taskTpl = createAITaskTemplate(t, client, owner.OrganizationID, withAgentToken(authToken), withSidebarURL(echoAgentAPI.URL()))
|
||||
taskName = strings.ReplaceAll(testutil.GetRandomName(t), "_", "-")
|
||||
)
|
||||
|
||||
//nolint:paralleltest // The sub-tests of this test must be run sequentially.
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
cmdArgs []string
|
||||
assertFn func(stdout string, userClient *codersdk.Client)
|
||||
}{
|
||||
{
|
||||
name: "create task",
|
||||
cmdArgs: []string{"exp", "task", "create", "test task input for " + t.Name(), "--name", taskName, "--template", taskTpl.Name},
|
||||
assertFn: func(stdout string, userClient *codersdk.Client) {
|
||||
require.Contains(t, stdout, taskName, "task name should be in output")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "list tasks after create",
|
||||
cmdArgs: []string{"exp", "task", "list", "--output", "json"},
|
||||
assertFn: func(stdout string, userClient *codersdk.Client) {
|
||||
var tasks []codersdk.Task
|
||||
err := json.NewDecoder(strings.NewReader(stdout)).Decode(&tasks)
|
||||
require.NoError(t, err, "list output should unmarshal properly")
|
||||
require.Len(t, tasks, 1, "expected one task")
|
||||
require.Equal(t, taskName, tasks[0].Name, "task name should match")
|
||||
require.Equal(t, initMsg.Content, tasks[0].InitialPrompt, "initial prompt should match")
|
||||
require.True(t, tasks[0].WorkspaceID.Valid, "workspace should be created")
|
||||
// For the next test, we need to wait for the workspace to be healthy
|
||||
ws := coderdtest.MustWorkspace(t, userClient, tasks[0].WorkspaceID.UUID)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID)
|
||||
agentClient := agentsdk.New(client.URL, agentsdk.WithFixedToken(authToken))
|
||||
_ = agenttest.New(t, client.URL, authToken, func(o *agent.Options) {
|
||||
o.Client = agentClient
|
||||
})
|
||||
coderdtest.NewWorkspaceAgentWaiter(t, userClient, tasks[0].WorkspaceID.UUID).WithContext(ctx).WaitFor(coderdtest.AgentsReady)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "get task status after create",
|
||||
cmdArgs: []string{"exp", "task", "status", taskName, "--output", "json"},
|
||||
assertFn: func(stdout string, userClient *codersdk.Client) {
|
||||
var task codersdk.Task
|
||||
require.NoError(t, json.NewDecoder(strings.NewReader(stdout)).Decode(&task), "should unmarshal task status")
|
||||
require.Equal(t, task.Name, taskName, "task name should match")
|
||||
require.Equal(t, codersdk.TaskStatusActive, task.Status, "task should be active")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "send task message",
|
||||
cmdArgs: []string{"exp", "task", "send", taskName, "hello"},
|
||||
// Assertions for this happen in the fake agent API handler.
|
||||
},
|
||||
{
|
||||
name: "read task logs",
|
||||
cmdArgs: []string{"exp", "task", "logs", taskName, "--output", "json"},
|
||||
assertFn: func(stdout string, userClient *codersdk.Client) {
|
||||
var logs []codersdk.TaskLogEntry
|
||||
require.NoError(t, json.NewDecoder(strings.NewReader(stdout)).Decode(&logs), "should unmarshal task logs")
|
||||
require.Len(t, logs, 3, "should have 3 logs")
|
||||
require.Equal(t, logs[0].Content, initMsg.Content, "first message should be the init message")
|
||||
require.Equal(t, logs[0].Type, codersdk.TaskLogTypeInput, "first message should be an input")
|
||||
require.Equal(t, logs[1].Content, "hello", "second message should be the sent message")
|
||||
require.Equal(t, logs[1].Type, codersdk.TaskLogTypeInput, "second message should be an input")
|
||||
require.Equal(t, logs[2].Content, "hello", "third message should be the echoed message")
|
||||
require.Equal(t, logs[2].Type, codersdk.TaskLogTypeOutput, "third message should be an output")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "delete task",
|
||||
cmdArgs: []string{"exp", "task", "delete", taskName, "--yes"},
|
||||
assertFn: func(stdout string, userClient *codersdk.Client) {
|
||||
// The task should eventually no longer show up in the list of tasks
|
||||
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
|
||||
expClient := codersdk.NewExperimentalClient(userClient)
|
||||
tasks, err := expClient.Tasks(ctx, &codersdk.TasksFilter{})
|
||||
if !assert.NoError(t, err) {
|
||||
return false
|
||||
}
|
||||
return slices.IndexFunc(tasks, func(task codersdk.Task) bool {
|
||||
return task.Name == taskName
|
||||
}) == -1
|
||||
}, testutil.IntervalMedium)
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var stdout strings.Builder
|
||||
inv, root := clitest.New(t, tc.cmdArgs...)
|
||||
inv.Stdout = &stdout
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
require.NoError(t, inv.WithContext(ctx).Run())
|
||||
if tc.assertFn != nil {
|
||||
tc.assertFn(stdout.String(), userClient)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func fakeAgentAPIEcho(ctx context.Context, t testing.TB, initMsg agentapisdk.Message, want ...string) map[string]http.HandlerFunc {
|
||||
t.Helper()
|
||||
var mmu sync.RWMutex
|
||||
msgs := []agentapisdk.Message{initMsg}
|
||||
wantCpy := make([]string, len(want))
|
||||
copy(wantCpy, want)
|
||||
t.Cleanup(func() {
|
||||
mmu.Lock()
|
||||
defer mmu.Unlock()
|
||||
if !t.Failed() {
|
||||
assert.Empty(t, wantCpy, "not all expected messages received: missing %v", wantCpy)
|
||||
}
|
||||
})
|
||||
writeAgentAPIError := func(w http.ResponseWriter, err error, status int) {
|
||||
w.WriteHeader(status)
|
||||
_ = json.NewEncoder(w).Encode(agentapisdk.ErrorModel{
|
||||
Errors: ptr.Ref([]agentapisdk.ErrorDetail{
|
||||
{
|
||||
Message: ptr.Ref(err.Error()),
|
||||
},
|
||||
}),
|
||||
})
|
||||
}
|
||||
return map[string]http.HandlerFunc{
|
||||
"/status": func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(agentapisdk.GetStatusResponse{
|
||||
Status: "stable",
|
||||
})
|
||||
},
|
||||
"/messages": func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
mmu.RLock()
|
||||
defer mmu.RUnlock()
|
||||
bs, err := json.Marshal(agentapisdk.GetMessagesResponse{
|
||||
Messages: msgs,
|
||||
})
|
||||
if err != nil {
|
||||
writeAgentAPIError(w, err, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
_, _ = w.Write(bs)
|
||||
},
|
||||
"/message": func(w http.ResponseWriter, r *http.Request) {
|
||||
mmu.Lock()
|
||||
defer mmu.Unlock()
|
||||
var params agentapisdk.PostMessageParams
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
err := json.NewDecoder(r.Body).Decode(¶ms)
|
||||
if !assert.NoError(t, err, "decode message") {
|
||||
writeAgentAPIError(w, err, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if len(wantCpy) == 0 {
|
||||
assert.Fail(t, "unexpected message", "received message %v, but no more expected messages", params)
|
||||
writeAgentAPIError(w, xerrors.New("no more expected messages"), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
exp := wantCpy[0]
|
||||
wantCpy = wantCpy[1:]
|
||||
|
||||
if !assert.Equal(t, exp, params.Content, "message content mismatch") {
|
||||
writeAgentAPIError(w, xerrors.New("unexpected message content: expected "+exp+", got "+params.Content), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
msgs = append(msgs, agentapisdk.Message{
|
||||
Id: int64(len(msgs) + 1),
|
||||
Content: params.Content,
|
||||
Role: agentapisdk.RoleUser,
|
||||
Time: time.Now().UTC(),
|
||||
})
|
||||
msgs = append(msgs, agentapisdk.Message{
|
||||
Id: int64(len(msgs) + 1),
|
||||
Content: params.Content,
|
||||
Role: agentapisdk.RoleAgent,
|
||||
Time: time.Now().UTC(),
|
||||
})
|
||||
assert.NoError(t, json.NewEncoder(w).Encode(agentapisdk.PostMessageResponse{
|
||||
Ok: true,
|
||||
}))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// setupCLITaskTest creates a test workspace with an AI task template and agent,
|
||||
// with a fake agent API configured with the provided set of handlers.
|
||||
// Returns the user client and workspace.
|
||||
func setupCLITaskTest(ctx context.Context, t *testing.T, agentAPIHandlers map[string]http.HandlerFunc) (*codersdk.Client, codersdk.Workspace) {
|
||||
func setupCLITaskTest(ctx context.Context, t *testing.T, agentAPIHandlers map[string]http.HandlerFunc) (*codersdk.Client, codersdk.Task) {
|
||||
t.Helper()
|
||||
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
@@ -34,11 +250,18 @@ func setupCLITaskTest(ctx context.Context, t *testing.T, agentAPIHandlers map[st
|
||||
template := createAITaskTemplate(t, client, owner.OrganizationID, withSidebarURL(fakeAPI.URL()), withAgentToken(authToken))
|
||||
|
||||
wantPrompt := "test prompt"
|
||||
workspace := coderdtest.CreateWorkspace(t, userClient, template.ID, func(req *codersdk.CreateWorkspaceRequest) {
|
||||
req.RichParameterValues = []codersdk.WorkspaceBuildParameter{
|
||||
{Name: codersdk.AITaskPromptParameterName, Value: wantPrompt},
|
||||
}
|
||||
exp := codersdk.NewExperimentalClient(userClient)
|
||||
task, err := exp.CreateTask(ctx, codersdk.Me, codersdk.CreateTaskRequest{
|
||||
TemplateVersionID: template.ActiveVersionID,
|
||||
Input: wantPrompt,
|
||||
Name: "test-task",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for the task's underlying workspace to be built
|
||||
require.True(t, task.WorkspaceID.Valid, "task should have a workspace ID")
|
||||
workspace, err := userClient.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
agentClient := agentsdk.New(client.URL, agentsdk.WithFixedToken(authToken))
|
||||
@@ -49,7 +272,7 @@ func setupCLITaskTest(ctx context.Context, t *testing.T, agentAPIHandlers map[st
|
||||
coderdtest.NewWorkspaceAgentWaiter(t, client, workspace.ID).
|
||||
WaitFor(coderdtest.AgentsReady)
|
||||
|
||||
return userClient, workspace
|
||||
return userClient, task
|
||||
}
|
||||
|
||||
// createAITaskTemplate creates a template configured for AI tasks with a sidebar app.
|
||||
@@ -70,7 +293,6 @@ func createAITaskTemplate(t *testing.T, client *codersdk.Client, orgID uuid.UUID
|
||||
{
|
||||
Type: &proto.Response_Plan{
|
||||
Plan: &proto.PlanComplete{
|
||||
Parameters: []*proto.RichParameter{{Name: codersdk.AITaskPromptParameterName, Type: "string"}},
|
||||
HasAiTasks: true,
|
||||
},
|
||||
},
|
||||
@@ -105,9 +327,7 @@ func createAITaskTemplate(t *testing.T, client *codersdk.Client, orgID uuid.UUID
|
||||
},
|
||||
AiTasks: []*proto.AITask{
|
||||
{
|
||||
SidebarApp: &proto.AITaskSidebarApp{
|
||||
Id: taskAppID.String(),
|
||||
},
|
||||
AppId: taskAppID.String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -0,0 +1,355 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/cli"
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
)
|
||||
|
||||
// mockKeyring is a mock sessionstore.Backend implementation.
|
||||
type mockKeyring struct {
|
||||
credentials map[string]string // service name -> credential
|
||||
}
|
||||
|
||||
const mockServiceName = "mock-service-name"
|
||||
|
||||
func newMockKeyring() *mockKeyring {
|
||||
return &mockKeyring{credentials: make(map[string]string)}
|
||||
}
|
||||
|
||||
func (m *mockKeyring) Read(_ *url.URL) (string, error) {
|
||||
cred, ok := m.credentials[mockServiceName]
|
||||
if !ok {
|
||||
return "", os.ErrNotExist
|
||||
}
|
||||
return cred, nil
|
||||
}
|
||||
|
||||
func (m *mockKeyring) Write(_ *url.URL, token string) error {
|
||||
m.credentials[mockServiceName] = token
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockKeyring) Delete(_ *url.URL) error {
|
||||
_, ok := m.credentials[mockServiceName]
|
||||
if !ok {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
delete(m.credentials, mockServiceName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestUseKeyring(t *testing.T) {
|
||||
// Verify that the --use-keyring flag opts into using a keyring backend for
|
||||
// storing session tokens instead of plain text files.
|
||||
t.Parallel()
|
||||
|
||||
t.Run("Login", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a test server
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Create a pty for interactive prompts
|
||||
pty := ptytest.New(t)
|
||||
|
||||
// Create CLI invocation with --use-keyring flag
|
||||
inv, cfg := clitest.New(t,
|
||||
"login",
|
||||
"--force-tty",
|
||||
"--use-keyring",
|
||||
"--no-open",
|
||||
client.URL.String(),
|
||||
)
|
||||
inv.Stdin = pty.Input()
|
||||
inv.Stdout = pty.Output()
|
||||
|
||||
// Inject the mock backend before running the command
|
||||
var root cli.RootCmd
|
||||
cmd, err := root.Command(root.AGPL())
|
||||
require.NoError(t, err)
|
||||
mockBackend := newMockKeyring()
|
||||
root.WithSessionStorageBackend(mockBackend)
|
||||
inv.Command = cmd
|
||||
|
||||
// Run login in background
|
||||
doneChan := make(chan struct{})
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
// Provide the token when prompted
|
||||
pty.ExpectMatch("Paste your token here:")
|
||||
pty.WriteLine(client.SessionToken())
|
||||
pty.ExpectMatch("Welcome to Coder")
|
||||
<-doneChan
|
||||
|
||||
// Verify that session file was NOT created (using keyring instead)
|
||||
sessionFile := path.Join(string(cfg), "session")
|
||||
_, err = os.Stat(sessionFile)
|
||||
require.True(t, os.IsNotExist(err), "session file should not exist when using keyring")
|
||||
|
||||
// Verify that the credential IS stored in mock keyring
|
||||
cred, err := mockBackend.Read(nil)
|
||||
require.NoError(t, err, "credential should be stored in mock keyring")
|
||||
require.Equal(t, client.SessionToken(), cred, "stored token should match login token")
|
||||
})
|
||||
|
||||
t.Run("Logout", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a test server
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Create a pty for interactive prompts
|
||||
pty := ptytest.New(t)
|
||||
|
||||
// First, login with --use-keyring
|
||||
loginInv, cfg := clitest.New(t,
|
||||
"login",
|
||||
"--force-tty",
|
||||
"--use-keyring",
|
||||
"--no-open",
|
||||
client.URL.String(),
|
||||
)
|
||||
loginInv.Stdin = pty.Input()
|
||||
loginInv.Stdout = pty.Output()
|
||||
|
||||
// Inject the mock backend
|
||||
var loginRoot cli.RootCmd
|
||||
loginCmd, err := loginRoot.Command(loginRoot.AGPL())
|
||||
require.NoError(t, err)
|
||||
mockBackend := newMockKeyring()
|
||||
loginRoot.WithSessionStorageBackend(mockBackend)
|
||||
loginInv.Command = loginCmd
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := loginInv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
pty.ExpectMatch("Paste your token here:")
|
||||
pty.WriteLine(client.SessionToken())
|
||||
pty.ExpectMatch("Welcome to Coder")
|
||||
<-doneChan
|
||||
|
||||
// Verify credential exists in mock keyring
|
||||
cred, err := mockBackend.Read(nil)
|
||||
require.NoError(t, err, "read credential should succeed before logout")
|
||||
require.NotEmpty(t, cred, "credential should exist after logout")
|
||||
|
||||
// Now run logout with --use-keyring
|
||||
logoutInv, _ := clitest.New(t,
|
||||
"logout",
|
||||
"--use-keyring",
|
||||
"--yes",
|
||||
"--global-config", string(cfg),
|
||||
)
|
||||
|
||||
// Inject the same mock backend
|
||||
var logoutRoot cli.RootCmd
|
||||
logoutCmd, err := logoutRoot.Command(logoutRoot.AGPL())
|
||||
require.NoError(t, err)
|
||||
logoutRoot.WithSessionStorageBackend(mockBackend)
|
||||
logoutInv.Command = logoutCmd
|
||||
|
||||
var logoutOut bytes.Buffer
|
||||
logoutInv.Stdout = &logoutOut
|
||||
|
||||
err = logoutInv.Run()
|
||||
require.NoError(t, err, "logout should succeed")
|
||||
|
||||
// Verify the credential was deleted from mock keyring
|
||||
_, err = mockBackend.Read(nil)
|
||||
require.ErrorIs(t, err, os.ErrNotExist, "credential should be deleted from keyring after logout")
|
||||
})
|
||||
|
||||
t.Run("OmitFlag", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a test server
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Create a pty for interactive prompts
|
||||
pty := ptytest.New(t)
|
||||
|
||||
// --use-keyring flag omitted (should use file-based storage)
|
||||
inv, cfg := clitest.New(t,
|
||||
"login",
|
||||
"--force-tty",
|
||||
"--no-open",
|
||||
client.URL.String(),
|
||||
)
|
||||
inv.Stdin = pty.Input()
|
||||
inv.Stdout = pty.Output()
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
pty.ExpectMatch("Paste your token here:")
|
||||
pty.WriteLine(client.SessionToken())
|
||||
pty.ExpectMatch("Welcome to Coder")
|
||||
<-doneChan
|
||||
|
||||
// Verify that session file WAS created (not using keyring)
|
||||
sessionFile := path.Join(string(cfg), "session")
|
||||
_, err := os.Stat(sessionFile)
|
||||
require.NoError(t, err, "session file should exist when NOT using --use-keyring")
|
||||
|
||||
// Read and verify the token from file
|
||||
content, err := os.ReadFile(sessionFile)
|
||||
require.NoError(t, err, "should be able to read session file")
|
||||
require.Equal(t, client.SessionToken(), string(content), "file should contain the session token")
|
||||
})
|
||||
|
||||
t.Run("EnvironmentVariable", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a test server
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Create a pty for interactive prompts
|
||||
pty := ptytest.New(t)
|
||||
|
||||
// Login using CODER_USE_KEYRING environment variable instead of flag
|
||||
inv, cfg := clitest.New(t,
|
||||
"login",
|
||||
"--force-tty",
|
||||
"--no-open",
|
||||
client.URL.String(),
|
||||
)
|
||||
inv.Stdin = pty.Input()
|
||||
inv.Stdout = pty.Output()
|
||||
inv.Environ.Set("CODER_USE_KEYRING", "true")
|
||||
|
||||
// Inject the mock backend
|
||||
var root cli.RootCmd
|
||||
cmd, err := root.Command(root.AGPL())
|
||||
require.NoError(t, err)
|
||||
mockBackend := newMockKeyring()
|
||||
root.WithSessionStorageBackend(mockBackend)
|
||||
inv.Command = cmd
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
pty.ExpectMatch("Paste your token here:")
|
||||
pty.WriteLine(client.SessionToken())
|
||||
pty.ExpectMatch("Welcome to Coder")
|
||||
<-doneChan
|
||||
|
||||
// Verify that session file was NOT created (using keyring via env var)
|
||||
sessionFile := path.Join(string(cfg), "session")
|
||||
_, err = os.Stat(sessionFile)
|
||||
require.True(t, os.IsNotExist(err), "session file should not exist when using keyring via env var")
|
||||
|
||||
// Verify credential is in mock keyring
|
||||
cred, err := mockBackend.Read(nil)
|
||||
require.NoError(t, err, "credential should be stored in keyring when CODER_USE_KEYRING=true")
|
||||
require.NotEmpty(t, cred)
|
||||
})
|
||||
}
|
||||
|
||||
func TestUseKeyringUnsupportedOS(t *testing.T) {
|
||||
// Verify that trying to use --use-keyring on an unsupported operating system produces
|
||||
// a helpful error message.
|
||||
t.Parallel()
|
||||
|
||||
// Skip on Windows since the keyring is actually supported.
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Skipping unsupported OS test on Windows where keyring is supported")
|
||||
}
|
||||
|
||||
const expMessage = "keyring storage is not supported on this operating system; remove the --use-keyring flag"
|
||||
|
||||
t.Run("LoginWithUnsupportedKeyring", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Try to login with --use-keyring on an unsupported OS
|
||||
inv, _ := clitest.New(t,
|
||||
"login",
|
||||
"--use-keyring",
|
||||
client.URL.String(),
|
||||
)
|
||||
|
||||
// The error should occur immediately, before any prompts
|
||||
loginErr := inv.Run()
|
||||
|
||||
// Verify we got an error about unsupported OS
|
||||
require.Error(t, loginErr)
|
||||
require.Contains(t, loginErr.Error(), expMessage)
|
||||
})
|
||||
|
||||
t.Run("LogoutWithUnsupportedKeyring", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
pty := ptytest.New(t)
|
||||
|
||||
// First login without keyring to create a session
|
||||
loginInv, cfg := clitest.New(t,
|
||||
"login",
|
||||
"--force-tty",
|
||||
"--no-open",
|
||||
client.URL.String(),
|
||||
)
|
||||
loginInv.Stdin = pty.Input()
|
||||
loginInv.Stdout = pty.Output()
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := loginInv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
pty.ExpectMatch("Paste your token here:")
|
||||
pty.WriteLine(client.SessionToken())
|
||||
pty.ExpectMatch("Welcome to Coder")
|
||||
<-doneChan
|
||||
|
||||
// Now try to logout with --use-keyring on an unsupported OS
|
||||
logoutInv, _ := clitest.New(t,
|
||||
"logout",
|
||||
"--use-keyring",
|
||||
"--yes",
|
||||
"--global-config", string(cfg),
|
||||
)
|
||||
|
||||
err := logoutInv.Run()
|
||||
// Verify we got an error about unsupported OS
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), expMessage)
|
||||
})
|
||||
}
|
||||
+24
-5
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/coder/pretty"
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/cli/sessionstore"
|
||||
"github.com/coder/coder/v2/coderd/userpassword"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/serpent"
|
||||
@@ -114,9 +115,11 @@ func (r *RootCmd) loginWithPassword(
|
||||
}
|
||||
|
||||
sessionToken := resp.SessionToken
|
||||
config := r.createConfig()
|
||||
err = config.Session().Write(sessionToken)
|
||||
err = r.ensureTokenBackend().Write(client.URL, sessionToken)
|
||||
if err != nil {
|
||||
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
|
||||
return errKeyringNotSupported
|
||||
}
|
||||
return xerrors.Errorf("write session token: %w", err)
|
||||
}
|
||||
|
||||
@@ -149,11 +152,15 @@ func (r *RootCmd) login() *serpent.Command {
|
||||
useTokenForSession bool
|
||||
)
|
||||
cmd := &serpent.Command{
|
||||
Use: "login [<url>]",
|
||||
Short: "Authenticate with Coder deployment",
|
||||
Use: "login [<url>]",
|
||||
Short: "Authenticate with Coder deployment",
|
||||
Long: "By default, the session token is stored in a plain text file. Use the " +
|
||||
"--use-keyring flag or set CODER_USE_KEYRING=true to store the token in " +
|
||||
"the operating system keyring instead.",
|
||||
Middleware: serpent.RequireRangeArgs(0, 1),
|
||||
Handler: func(inv *serpent.Invocation) error {
|
||||
ctx := inv.Context()
|
||||
|
||||
rawURL := ""
|
||||
var urlSource string
|
||||
|
||||
@@ -198,6 +205,15 @@ func (r *RootCmd) login() *serpent.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check keyring availability before prompting the user for a token to fail fast.
|
||||
if r.useKeyring {
|
||||
backend := r.ensureTokenBackend()
|
||||
_, err := backend.Read(client.URL)
|
||||
if err != nil && xerrors.Is(err, sessionstore.ErrNotImplemented) {
|
||||
return errKeyringNotSupported
|
||||
}
|
||||
}
|
||||
|
||||
hasFirstUser, err := client.HasFirstUser(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to check server %q for first user, is the URL correct and is coder accessible from your browser? Error - has initial user: %w", serverURL.String(), err)
|
||||
@@ -394,8 +410,11 @@ func (r *RootCmd) login() *serpent.Command {
|
||||
}
|
||||
|
||||
config := r.createConfig()
|
||||
err = config.Session().Write(sessionToken)
|
||||
err = r.ensureTokenBackend().Write(client.URL, sessionToken)
|
||||
if err != nil {
|
||||
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
|
||||
return errKeyringNotSupported
|
||||
}
|
||||
return xerrors.Errorf("write session token: %w", err)
|
||||
}
|
||||
err = config.URL().Write(serverURL.String())
|
||||
|
||||
+8
-3
@@ -8,6 +8,7 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/cli/sessionstore"
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
|
||||
@@ -46,11 +47,15 @@ func (r *RootCmd) logout() *serpent.Command {
|
||||
errors = append(errors, xerrors.Errorf("remove URL file: %w", err))
|
||||
}
|
||||
|
||||
err = config.Session().Delete()
|
||||
err = r.ensureTokenBackend().Delete(client.URL)
|
||||
// Only throw error if the session configuration file is present,
|
||||
// otherwise the user is already logged out, and we proceed
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
errors = append(errors, xerrors.Errorf("remove session file: %w", err))
|
||||
if err != nil && !xerrors.Is(err, os.ErrNotExist) {
|
||||
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
|
||||
errors = append(errors, errKeyringNotSupported)
|
||||
} else {
|
||||
errors = append(errors, xerrors.Errorf("remove session token: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
err = config.Organization().Delete()
|
||||
|
||||
+91
-25
@@ -37,6 +37,7 @@ import (
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/cli/config"
|
||||
"github.com/coder/coder/v2/cli/gitauth"
|
||||
"github.com/coder/coder/v2/cli/sessionstore"
|
||||
"github.com/coder/coder/v2/cli/telemetry"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
@@ -54,6 +55,8 @@ var (
|
||||
// ErrSilent is a sentinel error that tells the command handler to just exit with a non-zero error, but not print
|
||||
// anything.
|
||||
ErrSilent = xerrors.New("silent error")
|
||||
|
||||
errKeyringNotSupported = xerrors.New("keyring storage is not supported on this operating system; remove the --use-keyring flag to use file-based storage")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -68,12 +71,14 @@ const (
|
||||
varVerbose = "verbose"
|
||||
varDisableDirect = "disable-direct-connections"
|
||||
varDisableNetworkTelemetry = "disable-network-telemetry"
|
||||
varUseKeyring = "use-keyring"
|
||||
|
||||
notLoggedInMessage = "You are not logged in. Try logging in using '%s login <url>'."
|
||||
|
||||
envNoVersionCheck = "CODER_NO_VERSION_WARNING"
|
||||
envNoFeatureWarning = "CODER_NO_FEATURE_WARNING"
|
||||
envSessionToken = "CODER_SESSION_TOKEN"
|
||||
envUseKeyring = "CODER_USE_KEYRING"
|
||||
//nolint:gosec
|
||||
envAgentToken = "CODER_AGENT_TOKEN"
|
||||
//nolint:gosec
|
||||
@@ -474,6 +479,15 @@ func (r *RootCmd) Command(subcommands []*serpent.Command) (*serpent.Command, err
|
||||
Value: serpent.BoolOf(&r.disableNetworkTelemetry),
|
||||
Group: globalGroup,
|
||||
},
|
||||
{
|
||||
Flag: varUseKeyring,
|
||||
Env: envUseKeyring,
|
||||
Description: "Store and retrieve session tokens using the operating system " +
|
||||
"keyring. Currently only supported on Windows. By default, tokens are " +
|
||||
"stored in plain text files.",
|
||||
Value: serpent.BoolOf(&r.useKeyring),
|
||||
Group: globalGroup,
|
||||
},
|
||||
{
|
||||
Flag: "debug-http",
|
||||
Description: "Debug codersdk HTTP requests.",
|
||||
@@ -508,6 +522,7 @@ func (r *RootCmd) Command(subcommands []*serpent.Command) (*serpent.Command, err
|
||||
type RootCmd struct {
|
||||
clientURL *url.URL
|
||||
token string
|
||||
tokenBackend sessionstore.Backend
|
||||
globalConfig string
|
||||
header []string
|
||||
headerCommand string
|
||||
@@ -522,6 +537,7 @@ type RootCmd struct {
|
||||
disableNetworkTelemetry bool
|
||||
noVersionCheck bool
|
||||
noFeatureWarning bool
|
||||
useKeyring bool
|
||||
}
|
||||
|
||||
// InitClient creates and configures a new client with authentication, telemetry,
|
||||
@@ -549,14 +565,19 @@ func (r *RootCmd) InitClient(inv *serpent.Invocation) (*codersdk.Client, error)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Read the token stored on disk.
|
||||
if r.token == "" {
|
||||
r.token, err = conf.Session().Read()
|
||||
tok, err := r.ensureTokenBackend().Read(r.clientURL)
|
||||
// Even if there isn't a token, we don't care.
|
||||
// Some API routes can be unauthenticated.
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
if err != nil && !xerrors.Is(err, os.ErrNotExist) {
|
||||
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
|
||||
return nil, errKeyringNotSupported
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if tok != "" {
|
||||
r.token = tok
|
||||
}
|
||||
}
|
||||
|
||||
// Configure HTTP client with transport wrappers
|
||||
@@ -588,7 +609,6 @@ func (r *RootCmd) InitClient(inv *serpent.Invocation) (*codersdk.Client, error)
|
||||
// This allows commands to run without requiring authentication, but still use auth if available.
|
||||
func (r *RootCmd) TryInitClient(inv *serpent.Invocation) (*codersdk.Client, error) {
|
||||
conf := r.createConfig()
|
||||
var err error
|
||||
// Read the client URL stored on disk.
|
||||
if r.clientURL == nil || r.clientURL.String() == "" {
|
||||
rawURL, err := conf.URL().Read()
|
||||
@@ -605,14 +625,19 @@ func (r *RootCmd) TryInitClient(inv *serpent.Invocation) (*codersdk.Client, erro
|
||||
}
|
||||
}
|
||||
}
|
||||
// Read the token stored on disk.
|
||||
if r.token == "" {
|
||||
r.token, err = conf.Session().Read()
|
||||
tok, err := r.ensureTokenBackend().Read(r.clientURL)
|
||||
// Even if there isn't a token, we don't care.
|
||||
// Some API routes can be unauthenticated.
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
if err != nil && !xerrors.Is(err, os.ErrNotExist) {
|
||||
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
|
||||
return nil, errKeyringNotSupported
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if tok != "" {
|
||||
r.token = tok
|
||||
}
|
||||
}
|
||||
|
||||
// Only configure the client if we have a URL
|
||||
@@ -688,6 +713,24 @@ func (r *RootCmd) createUnauthenticatedClient(ctx context.Context, serverURL *ur
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// ensureTokenBackend returns the session token storage backend, creating it if necessary.
|
||||
// This must be called after flags are parsed so we can respect the value of the --use-keyring
|
||||
// flag.
|
||||
func (r *RootCmd) ensureTokenBackend() sessionstore.Backend {
|
||||
if r.tokenBackend == nil {
|
||||
if r.useKeyring {
|
||||
r.tokenBackend = sessionstore.NewKeyring()
|
||||
} else {
|
||||
r.tokenBackend = sessionstore.NewFile(r.createConfig)
|
||||
}
|
||||
}
|
||||
return r.tokenBackend
|
||||
}
|
||||
|
||||
func (r *RootCmd) WithSessionStorageBackend(backend sessionstore.Backend) {
|
||||
r.tokenBackend = backend
|
||||
}
|
||||
|
||||
type AgentAuth struct {
|
||||
// Agent Client config
|
||||
agentToken string
|
||||
@@ -1318,14 +1361,37 @@ func SlimUnsupported(w io.Writer, cmd string) {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func defaultUpgradeMessage(version string) string {
|
||||
// Our installation script doesn't work on Windows, so instead we direct the user
|
||||
// to the GitHub release page to download the latest installer.
|
||||
version = strings.TrimPrefix(version, "v")
|
||||
if runtime.GOOS == "windows" {
|
||||
return fmt.Sprintf("download the server version from: https://github.com/coder/coder/releases/v%s", version)
|
||||
// defaultUpgradeMessage builds an appropriate upgrade message for the platform.
|
||||
// Precedence:
|
||||
// 1. If a custom upgrade message is provided by the server, use it.
|
||||
// 2. If the server provides a dashboard URL (v2.19+) and the platform is not Windows,
|
||||
// recommend the site-local install.sh.
|
||||
// 3. On Windows, point to the tagged GitHub release page where binaries are published.
|
||||
// 4. Otherwise, recommend the global install.sh with explicit version.
|
||||
func defaultUpgradeMessage(version, dashboardURL, customUpgradeMessage string) string {
|
||||
if customUpgradeMessage != "" {
|
||||
return customUpgradeMessage
|
||||
}
|
||||
return fmt.Sprintf("download the server version with: 'curl -L https://coder.com/install.sh | sh -s -- --version %s'", version)
|
||||
|
||||
// Ensure canonical semver for comparisons and display
|
||||
canonical := semver.Canonical(version)
|
||||
trimmed := strings.TrimPrefix(canonical, "v")
|
||||
|
||||
// The site-local `install.sh` was introduced in v2.19.0.
|
||||
if dashboardURL != "" && semver.Compare(semver.MajorMinor(canonical), "v2.19") >= 0 {
|
||||
// The site-local install.sh is only valid for macOS and Linux.
|
||||
if runtime.GOOS != "windows" {
|
||||
return fmt.Sprintf("download %s with: 'curl -fsSL %s/install.sh | sh'", canonical, dashboardURL)
|
||||
}
|
||||
// Fall through to Windows-specific suggestion below.
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
// Link directly to the release page; Windows binaries are published there.
|
||||
return fmt.Sprintf("download the server version from: https://github.com/coder/coder/releases/v%s", trimmed)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("download the server version with: 'curl -L https://coder.com/install.sh | sh -s -- --version %s'", trimmed)
|
||||
}
|
||||
|
||||
// wrapTransportWithEntitlementsCheck adds a middleware to the HTTP transport
|
||||
@@ -1364,19 +1430,19 @@ func wrapTransportWithVersionMismatchCheck(rt http.RoundTripper, inv *serpent.In
|
||||
if buildinfo.VersionsMatch(clientVersion, serverVersion) {
|
||||
return
|
||||
}
|
||||
upgradeMessage := defaultUpgradeMessage(semver.Canonical(serverVersion))
|
||||
var dashboardURL, customUpgradeMessage string
|
||||
if serverInfo, err := getBuildInfo(inv.Context()); err == nil {
|
||||
switch {
|
||||
case serverInfo.UpgradeMessage != "":
|
||||
upgradeMessage = serverInfo.UpgradeMessage
|
||||
// The site-local `install.sh` was introduced in v2.19.0
|
||||
case serverInfo.DashboardURL != "" && semver.Compare(semver.MajorMinor(serverVersion), "v2.19") >= 0:
|
||||
upgradeMessage = fmt.Sprintf("download %s with: 'curl -fsSL %s/install.sh | sh'", serverVersion, serverInfo.DashboardURL)
|
||||
}
|
||||
dashboardURL = serverInfo.DashboardURL
|
||||
customUpgradeMessage = serverInfo.UpgradeMessage
|
||||
}
|
||||
fmtWarningText := "version mismatch: client %s, server %s\n%s"
|
||||
fmtWarn := pretty.Sprint(cliui.DefaultStyles.Warn, fmtWarningText)
|
||||
warning := fmt.Sprintf(fmtWarn, clientVersion, serverVersion, upgradeMessage)
|
||||
upgradeMessage := defaultUpgradeMessage(serverVersion, dashboardURL, customUpgradeMessage)
|
||||
warning := pretty.Sprintf(
|
||||
cliui.DefaultStyles.Warn,
|
||||
"version mismatch: client %s, server %s\n%s",
|
||||
clientVersion,
|
||||
serverVersion,
|
||||
upgradeMessage,
|
||||
)
|
||||
|
||||
_, _ = fmt.Fprintln(inv.Stderr, warning)
|
||||
})
|
||||
|
||||
@@ -176,6 +176,22 @@ func (r *RootCmd) scheduleStart() *serpent.Command {
|
||||
}
|
||||
|
||||
schedStr = ptr.Ref(sched.String())
|
||||
|
||||
// Check if the template has autostart requirements that may conflict
|
||||
// with the user's schedule.
|
||||
template, err := client.Template(inv.Context(), workspace.TemplateID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get template: %w", err)
|
||||
}
|
||||
|
||||
if len(template.AutostartRequirement.DaysOfWeek) > 0 {
|
||||
_, _ = fmt.Fprintf(
|
||||
inv.Stderr,
|
||||
"Warning: your workspace template restricts autostart to the following days: %s.\n"+
|
||||
"Your workspace may only autostart on these days.\n",
|
||||
strings.Join(template.AutostartRequirement.DaysOfWeek, ", "),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
err = client.UpdateWorkspaceAutostart(inv.Context(), workspace.ID, codersdk.UpdateWorkspaceAutostartRequest{
|
||||
|
||||
@@ -373,3 +373,67 @@ func TestScheduleOverride(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:paralleltest // t.Setenv
|
||||
func TestScheduleStart_TemplateAutostartRequirement(t *testing.T) {
|
||||
t.Setenv("TZ", "UTC")
|
||||
loc, err := tz.TimezoneIANA()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "UTC", loc.String())
|
||||
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
|
||||
// Update template to have autostart requirement
|
||||
// Note: In AGPL, this will be ignored and all days will be allowed (enterprise feature).
|
||||
template, err = client.UpdateTemplateMeta(context.Background(), template.ID, codersdk.UpdateTemplateMeta{
|
||||
AutostartRequirement: &codersdk.TemplateAutostartRequirement{
|
||||
DaysOfWeek: []string{"monday", "wednesday", "friday"},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the template - in AGPL, AutostartRequirement will have all days (enterprise feature)
|
||||
template, err = client.Template(context.Background(), template.ID)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, template.AutostartRequirement.DaysOfWeek, "template should have autostart requirement days")
|
||||
|
||||
workspace := coderdtest.CreateWorkspace(t, client, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
t.Run("ShowsWarning", func(t *testing.T) {
|
||||
// When: user sets autostart schedule
|
||||
inv, root := clitest.New(t,
|
||||
"schedule", "start", workspace.Name, "9:30AM", "Mon-Fri",
|
||||
)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
require.NoError(t, inv.Run())
|
||||
|
||||
// Then: warning should be shown
|
||||
// In AGPL, this will show all days (enterprise feature defaults to all days allowed)
|
||||
pty.ExpectMatch("Warning")
|
||||
pty.ExpectMatch("may only autostart")
|
||||
})
|
||||
|
||||
t.Run("NoWarningWhenManual", func(t *testing.T) {
|
||||
// When: user sets manual schedule
|
||||
inv, root := clitest.New(t,
|
||||
"schedule", "start", workspace.Name, "manual",
|
||||
)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
|
||||
var stderrBuf bytes.Buffer
|
||||
inv.Stderr = &stderrBuf
|
||||
|
||||
require.NoError(t, inv.Run())
|
||||
|
||||
// Then: no warning should be shown on stderr
|
||||
stderrOutput := stderrBuf.String()
|
||||
require.NotContains(t, stderrOutput, "Warning")
|
||||
})
|
||||
}
|
||||
|
||||
+75
-39
@@ -29,6 +29,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
@@ -1377,6 +1378,7 @@ func IsLocalURL(ctx context.Context, u *url.URL) (bool, error) {
|
||||
}
|
||||
|
||||
func shutdownWithTimeout(shutdown func(context.Context) error, timeout time.Duration) error {
|
||||
// nolint:gocritic // The magic number is parameterized.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
return shutdown(ctx)
|
||||
@@ -2134,50 +2136,83 @@ func startBuiltinPostgres(ctx context.Context, cfg config.Root, logger slog.Logg
|
||||
return "", nil, xerrors.New("The built-in PostgreSQL cannot run as the root user. Create a non-root user and run again!")
|
||||
}
|
||||
|
||||
// Ensure a password and port have been generated!
|
||||
connectionURL, err := embeddedPostgresURL(cfg)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
pgPassword, err := cfg.PostgresPassword().Read()
|
||||
if err != nil {
|
||||
return "", nil, xerrors.Errorf("read postgres password: %w", err)
|
||||
}
|
||||
pgPortRaw, err := cfg.PostgresPort().Read()
|
||||
if err != nil {
|
||||
return "", nil, xerrors.Errorf("read postgres port: %w", err)
|
||||
}
|
||||
pgPort, err := strconv.ParseUint(pgPortRaw, 10, 16)
|
||||
if err != nil {
|
||||
return "", nil, xerrors.Errorf("parse postgres port: %w", err)
|
||||
}
|
||||
|
||||
cachePath := filepath.Join(cfg.PostgresPath(), "cache")
|
||||
if customCacheDir != "" {
|
||||
cachePath = filepath.Join(customCacheDir, "postgres")
|
||||
}
|
||||
stdlibLogger := slog.Stdlib(ctx, logger.Named("postgres"), slog.LevelDebug)
|
||||
ep := embeddedpostgres.NewDatabase(
|
||||
embeddedpostgres.DefaultConfig().
|
||||
Version(embeddedpostgres.V13).
|
||||
BinariesPath(filepath.Join(cfg.PostgresPath(), "bin")).
|
||||
// Default BinaryRepositoryURL repo1.maven.org is flaky.
|
||||
BinaryRepositoryURL("https://repo.maven.apache.org/maven2").
|
||||
DataPath(filepath.Join(cfg.PostgresPath(), "data")).
|
||||
RuntimePath(filepath.Join(cfg.PostgresPath(), "runtime")).
|
||||
CachePath(cachePath).
|
||||
Username("coder").
|
||||
Password(pgPassword).
|
||||
Database("coder").
|
||||
Encoding("UTF8").
|
||||
Port(uint32(pgPort)).
|
||||
Logger(stdlibLogger.Writer()),
|
||||
)
|
||||
err = ep.Start()
|
||||
if err != nil {
|
||||
return "", nil, xerrors.Errorf("Failed to start built-in PostgreSQL. Optionally, specify an external deployment with `--postgres-url`: %w", err)
|
||||
|
||||
// If the port is not defined, an available port will be found dynamically.
|
||||
maxAttempts := 1
|
||||
_, err = cfg.PostgresPort().Read()
|
||||
retryPortDiscovery := errors.Is(err, os.ErrNotExist) && testing.Testing()
|
||||
if retryPortDiscovery {
|
||||
// There is no way to tell Postgres to use an ephemeral port, so in order to avoid
|
||||
// flaky tests in CI we need to retry EmbeddedPostgres.Start in case of a race
|
||||
// condition where the port we quickly listen on and close in embeddedPostgresURL()
|
||||
// is not free by the time the embedded postgres starts up. This maximum_should
|
||||
// cover most cases where port conflicts occur in CI and cause flaky tests.
|
||||
maxAttempts = 3
|
||||
}
|
||||
return connectionURL, ep.Stop, nil
|
||||
|
||||
var startErr error
|
||||
for attempt := 0; attempt < maxAttempts; attempt++ {
|
||||
// Ensure a password and port have been generated.
|
||||
connectionURL, err := embeddedPostgresURL(cfg)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
pgPassword, err := cfg.PostgresPassword().Read()
|
||||
if err != nil {
|
||||
return "", nil, xerrors.Errorf("read postgres password: %w", err)
|
||||
}
|
||||
pgPortRaw, err := cfg.PostgresPort().Read()
|
||||
if err != nil {
|
||||
return "", nil, xerrors.Errorf("read postgres port: %w", err)
|
||||
}
|
||||
pgPort, err := strconv.ParseUint(pgPortRaw, 10, 16)
|
||||
if err != nil {
|
||||
return "", nil, xerrors.Errorf("parse postgres port: %w", err)
|
||||
}
|
||||
|
||||
ep := embeddedpostgres.NewDatabase(
|
||||
embeddedpostgres.DefaultConfig().
|
||||
Version(embeddedpostgres.V13).
|
||||
BinariesPath(filepath.Join(cfg.PostgresPath(), "bin")).
|
||||
// Default BinaryRepositoryURL repo1.maven.org is flaky.
|
||||
BinaryRepositoryURL("https://repo.maven.apache.org/maven2").
|
||||
DataPath(filepath.Join(cfg.PostgresPath(), "data")).
|
||||
RuntimePath(filepath.Join(cfg.PostgresPath(), "runtime")).
|
||||
CachePath(cachePath).
|
||||
Username("coder").
|
||||
Password(pgPassword).
|
||||
Database("coder").
|
||||
Encoding("UTF8").
|
||||
Port(uint32(pgPort)).
|
||||
Logger(stdlibLogger.Writer()),
|
||||
)
|
||||
|
||||
startErr = ep.Start()
|
||||
if startErr == nil {
|
||||
return connectionURL, ep.Stop, nil
|
||||
}
|
||||
|
||||
logger.Warn(ctx, "failed to start embedded postgres",
|
||||
slog.F("attempt", attempt+1),
|
||||
slog.F("max_attempts", maxAttempts),
|
||||
slog.F("port", pgPort),
|
||||
slog.Error(startErr),
|
||||
)
|
||||
|
||||
if retryPortDiscovery {
|
||||
// Since a retry is needed, we wipe the port stored here at the beginning of the loop.
|
||||
_ = cfg.PostgresPort().Delete()
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil, xerrors.Errorf("failed to start built-in PostgreSQL after %d attempts. "+
|
||||
"Optionally, specify an external deployment. See https://coder.com/docs/tutorials/external-database "+
|
||||
"for more details: %w", maxAttempts, startErr)
|
||||
}
|
||||
|
||||
func ConfigureHTTPClient(ctx context.Context, clientCertFile, clientKeyFile string, tlsClientCAFile string) (context.Context, *http.Client, error) {
|
||||
@@ -2286,7 +2321,7 @@ func ConnectToPostgres(ctx context.Context, logger slog.Logger, driver string, d
|
||||
var err error
|
||||
var sqlDB *sql.DB
|
||||
dbNeedsClosing := true
|
||||
// Try to connect for 30 seconds.
|
||||
// nolint:gocritic // Try to connect for 30 seconds.
|
||||
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
@@ -2382,6 +2417,7 @@ func ConnectToPostgres(ctx context.Context, logger slog.Logger, driver string, d
|
||||
}
|
||||
|
||||
func pingPostgres(ctx context.Context, db *sql.DB) error {
|
||||
// nolint:gocritic // This is a reasonable magic number for a ping timeout.
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
return db.PingContext(ctx)
|
||||
|
||||
@@ -17,9 +17,6 @@ import (
|
||||
|
||||
func TestRegenerateVapidKeypair(t *testing.T) {
|
||||
t.Parallel()
|
||||
if !dbtestutil.WillUsePostgres() {
|
||||
t.Skip("this test is only supported on postgres")
|
||||
}
|
||||
|
||||
t.Run("NoExistingVAPIDKeys", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -348,9 +348,6 @@ func TestServer(t *testing.T) {
|
||||
|
||||
runGitHubProviderTest := func(t *testing.T, tc testCase) {
|
||||
t.Parallel()
|
||||
if !dbtestutil.WillUsePostgres() {
|
||||
t.Skip("test requires postgres")
|
||||
}
|
||||
|
||||
ctx, cancelFunc := context.WithCancel(testutil.Context(t, testutil.WaitLong))
|
||||
defer cancelFunc()
|
||||
@@ -2142,10 +2139,6 @@ func TestServerYAMLConfig(t *testing.T) {
|
||||
func TestConnectToPostgres(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if !dbtestutil.WillUsePostgres() {
|
||||
t.Skip("this test does not make sense without postgres")
|
||||
}
|
||||
|
||||
t.Run("Migrate", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -2256,10 +2249,6 @@ type runServerOpts struct {
|
||||
func TestServer_TelemetryDisabled_FinalReport(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if !dbtestutil.WillUsePostgres() {
|
||||
t.Skip("this test requires postgres")
|
||||
}
|
||||
|
||||
telemetryServerURL, deployment, snapshot := mockTelemetryServer(t)
|
||||
dbConnURL, err := dbtestutil.Open(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -0,0 +1,239 @@
|
||||
// Package sessionstore provides CLI session token storage mechanisms.
|
||||
// Operating system keyring storage is intended to have compatibility with other Coder
|
||||
// applications (e.g. Coder Desktop, Coder provider for JetBrains Toolbox, etc) so that
|
||||
// applications can read/write the same credential stored in the keyring.
|
||||
//
|
||||
// Note that we aren't using an existing Go package zalando/go-keyring here for a few
|
||||
// reasons. 1) It prescribes the format of the target credential name in the OS keyrings,
|
||||
// which makes our life difficult for compatibility with other Coder applications. 2)
|
||||
// It uses init functions that make it difficult to test with. As a result, the OS
|
||||
// keyring implementations may be adapted from zalando/go-keyring source (i.e. Windows).
|
||||
package sessionstore
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/config"
|
||||
)
|
||||
|
||||
// Backend is a storage backend for session tokens.
|
||||
type Backend interface {
|
||||
// Read returns the session token for the given server URL or an error, if any. It
|
||||
// will return os.ErrNotExist if no token exists for the given URL.
|
||||
Read(serverURL *url.URL) (string, error)
|
||||
// Write stores the session token for the given server URL.
|
||||
Write(serverURL *url.URL, token string) error
|
||||
// Delete removes the session token for the given server URL or an error, if any.
|
||||
// It will return os.ErrNotExist error if no token exists to delete.
|
||||
Delete(serverURL *url.URL) error
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
// ErrSetDataTooBig is returned if `keyringProvider.Set` was called with too much data.
|
||||
// On macOS: The combination of service, username & password should not exceed ~3000 bytes
|
||||
// On Windows: The service is limited to 32KiB while the password is limited to 2560 bytes
|
||||
ErrSetDataTooBig = xerrors.New("data passed to Set was too big")
|
||||
|
||||
// ErrNotImplemented represents when keyring usage is not implemented on the current
|
||||
// operating system.
|
||||
ErrNotImplemented = xerrors.New("not implemented")
|
||||
)
|
||||
|
||||
// keyringProvider represents an operating system keyring. The expectation
|
||||
// is these methods operate on the user/login keyring.
|
||||
type keyringProvider interface {
|
||||
// Set stores the given credential for a service name in the operating system
|
||||
// keyring.
|
||||
Set(service, credential string) error
|
||||
// Get retrieves the credential from the keyring. It must return os.ErrNotExist
|
||||
// if the credential is not found.
|
||||
Get(service string) ([]byte, error)
|
||||
// Delete deletes the credential from the keyring. It must return os.ErrNotExist
|
||||
// if the credential is not found.
|
||||
Delete(service string) error
|
||||
}
|
||||
|
||||
// credential represents a single credential entry.
|
||||
type credential struct {
|
||||
CoderURL string `json:"coder_url"`
|
||||
APIToken string `json:"api_token"`
|
||||
}
|
||||
|
||||
// credentialsMap represents the JSON structure stored in the operating system keyring.
|
||||
// It supports storing multiple credentials for different server URLs.
|
||||
type credentialsMap map[string]credential
|
||||
|
||||
// normalizeHost returns a normalized version of the URL host for use as a map key.
|
||||
func normalizeHost(u *url.URL) (string, error) {
|
||||
if u == nil || u.Host == "" {
|
||||
return "", xerrors.New("nil server URL")
|
||||
}
|
||||
return strings.TrimSpace(strings.ToLower(u.Host)), nil
|
||||
}
|
||||
|
||||
// parseCredentialsJSON parses the JSON from the keyring into a credentialsMap.
|
||||
func parseCredentialsJSON(jsonData []byte) (credentialsMap, error) {
|
||||
if len(jsonData) == 0 {
|
||||
return make(credentialsMap), nil
|
||||
}
|
||||
|
||||
var creds credentialsMap
|
||||
if err := json.Unmarshal(jsonData, &creds); err != nil {
|
||||
return nil, xerrors.Errorf("unmarshal credentials: %w", err)
|
||||
}
|
||||
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
// Keyring is a Backend that exclusively stores the session token in the operating
|
||||
// system keyring. Happy path usage of this type should start with NewKeyring.
|
||||
// It stores a JSON object in the keyring that supports multiple credentials for
|
||||
// different server URLs, providing compatibility with Coder Desktop and other Coder
|
||||
// applications.
|
||||
type Keyring struct {
|
||||
provider keyringProvider
|
||||
serviceName string
|
||||
}
|
||||
|
||||
// NewKeyring creates a Keyring with the default service name for production use.
|
||||
func NewKeyring() Keyring {
|
||||
return Keyring{
|
||||
provider: operatingSystemKeyring{},
|
||||
serviceName: defaultServiceName,
|
||||
}
|
||||
}
|
||||
|
||||
// NewKeyringWithService creates a Keyring Backend that stores credentials under the
|
||||
// specified service name. This is primarily intended for testing to avoid conflicts
|
||||
// with production credentials and collisions between tests.
|
||||
func NewKeyringWithService(serviceName string) Keyring {
|
||||
return Keyring{
|
||||
provider: operatingSystemKeyring{},
|
||||
serviceName: serviceName,
|
||||
}
|
||||
}
|
||||
|
||||
func (o Keyring) Read(serverURL *url.URL) (string, error) {
|
||||
host, err := normalizeHost(serverURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
credJSON, err := o.provider.Get(o.serviceName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(credJSON) == 0 {
|
||||
return "", os.ErrNotExist
|
||||
}
|
||||
|
||||
creds, err := parseCredentialsJSON(credJSON)
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("read: parse existing credentials: %w", err)
|
||||
}
|
||||
|
||||
// Return the credential for the specified URL
|
||||
cred, ok := creds[host]
|
||||
if !ok {
|
||||
return "", os.ErrNotExist
|
||||
}
|
||||
return cred.APIToken, nil
|
||||
}
|
||||
|
||||
func (o Keyring) Write(serverURL *url.URL, token string) error {
|
||||
host, err := normalizeHost(serverURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
existingJSON, err := o.provider.Get(o.serviceName)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return xerrors.Errorf("read existing credentials: %w", err)
|
||||
}
|
||||
|
||||
creds, err := parseCredentialsJSON(existingJSON)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("write: parse existing credentials: %w", err)
|
||||
}
|
||||
|
||||
// Upsert the credential for this URL.
|
||||
creds[host] = credential{
|
||||
CoderURL: host,
|
||||
APIToken: token,
|
||||
}
|
||||
|
||||
credsJSON, err := json.Marshal(creds)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("marshal credentials: %w", err)
|
||||
}
|
||||
|
||||
err = o.provider.Set(o.serviceName, string(credsJSON))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("write credentials to keyring: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o Keyring) Delete(serverURL *url.URL) error {
|
||||
host, err := normalizeHost(serverURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
existingJSON, err := o.provider.Get(o.serviceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
creds, err := parseCredentialsJSON(existingJSON)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to parse existing credentials: %w", err)
|
||||
}
|
||||
|
||||
if _, ok := creds[host]; !ok {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
|
||||
delete(creds, host)
|
||||
|
||||
// Delete the entire keyring entry when no credentials remain.
|
||||
if len(creds) == 0 {
|
||||
return o.provider.Delete(o.serviceName)
|
||||
}
|
||||
|
||||
// Write back the updated credentials map.
|
||||
credsJSON, err := json.Marshal(creds)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to marshal credentials: %w", err)
|
||||
}
|
||||
|
||||
return o.provider.Set(o.serviceName, string(credsJSON))
|
||||
}
|
||||
|
||||
// File is a Backend that exclusively stores the session token in a file on disk.
|
||||
type File struct {
|
||||
config func() config.Root
|
||||
}
|
||||
|
||||
func NewFile(f func() config.Root) *File {
|
||||
return &File{config: f}
|
||||
}
|
||||
|
||||
func (f *File) Read(_ *url.URL) (string, error) {
|
||||
return f.config().Session().Read()
|
||||
}
|
||||
|
||||
func (f *File) Write(_ *url.URL, token string) error {
|
||||
return f.config().Session().Write(token)
|
||||
}
|
||||
|
||||
func (f *File) Delete(_ *url.URL) error {
|
||||
return f.config().Session().Delete()
|
||||
}
|
||||
@@ -0,0 +1,121 @@
|
||||
package sessionstore
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNormalizeHost(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
url *url.URL
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "StandardHost",
|
||||
url: &url.URL{Host: "coder.example.com"},
|
||||
want: "coder.example.com",
|
||||
},
|
||||
{
|
||||
name: "HostWithPort",
|
||||
url: &url.URL{Host: "coder.example.com:8080"},
|
||||
want: "coder.example.com:8080",
|
||||
},
|
||||
{
|
||||
name: "UppercaseHost",
|
||||
url: &url.URL{Host: "CODER.EXAMPLE.COM"},
|
||||
want: "coder.example.com",
|
||||
},
|
||||
{
|
||||
name: "HostWithWhitespace",
|
||||
url: &url.URL{Host: " coder.example.com "},
|
||||
want: "coder.example.com",
|
||||
},
|
||||
{
|
||||
name: "NilURL",
|
||||
url: nil,
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "EmptyHost",
|
||||
url: &url.URL{Host: ""},
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got, err := normalizeHost(tt.url)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseCredentialsJSON(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("Empty", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
creds, err := parseCredentialsJSON(nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, creds)
|
||||
require.Empty(t, creds)
|
||||
})
|
||||
|
||||
t.Run("NewFormat", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
jsonData := []byte(`{
|
||||
"coder1.example.com": {"coder_url": "coder1.example.com", "api_token": "token1"},
|
||||
"coder2.example.com": {"coder_url": "coder2.example.com", "api_token": "token2"}
|
||||
}`)
|
||||
creds, err := parseCredentialsJSON(jsonData)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, creds, 2)
|
||||
require.Equal(t, "token1", creds["coder1.example.com"].APIToken)
|
||||
require.Equal(t, "token2", creds["coder2.example.com"].APIToken)
|
||||
})
|
||||
|
||||
t.Run("InvalidJSON", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
jsonData := []byte(`{invalid json}`)
|
||||
_, err := parseCredentialsJSON(jsonData)
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCredentialsMap_RoundTrip(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
creds := credentialsMap{
|
||||
"coder1.example.com": {
|
||||
CoderURL: "coder1.example.com",
|
||||
APIToken: "token1",
|
||||
},
|
||||
"coder2.example.com:8080": {
|
||||
CoderURL: "coder2.example.com:8080",
|
||||
APIToken: "token2",
|
||||
},
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(creds)
|
||||
require.NoError(t, err)
|
||||
|
||||
parsed, err := parseCredentialsJSON(jsonData)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, creds, parsed)
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
//go:build !windows
|
||||
|
||||
package sessionstore
|
||||
|
||||
const defaultServiceName = "not-implemented"
|
||||
|
||||
type operatingSystemKeyring struct{}
|
||||
|
||||
func (operatingSystemKeyring) Set(_, _ string) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
func (operatingSystemKeyring) Get(_ string) ([]byte, error) {
|
||||
return nil, ErrNotImplemented
|
||||
}
|
||||
|
||||
func (operatingSystemKeyring) Delete(_ string) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
@@ -0,0 +1,342 @@
|
||||
package sessionstore_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/cli/config"
|
||||
"github.com/coder/coder/v2/cli/sessionstore"
|
||||
)
|
||||
|
||||
// Generate a test service name for use with the OS keyring. It uses a combination
|
||||
// of the test name and a nanosecond timestamp to prevent collisions.
|
||||
func keyringTestServiceName(t *testing.T) string {
|
||||
t.Helper()
|
||||
return t.Name() + "_" + fmt.Sprintf("%v", time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func TestKeyring(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skip("linux and darwin are not supported yet")
|
||||
}
|
||||
|
||||
// This test exercises use of the operating system keyring. As a result,
|
||||
// the operating system keyring is expected to be available.
|
||||
|
||||
const (
|
||||
testURL = "http://127.0.0.1:1337"
|
||||
testURL2 = "http://127.0.0.1:1338"
|
||||
)
|
||||
|
||||
t.Run("ReadNonExistent", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = backend.Delete(srvURL) })
|
||||
|
||||
_, err = backend.Read(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, os.IsNotExist(err), "expected os.ErrNotExist when reading non-existent token")
|
||||
})
|
||||
|
||||
t.Run("DeleteNonExistent", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = backend.Delete(srvURL) })
|
||||
|
||||
err = backend.Delete(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, errors.Is(err, os.ErrNotExist), "expected os.ErrNotExist when deleting non-existent token")
|
||||
})
|
||||
|
||||
t.Run("WriteAndRead", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = backend.Delete(srvURL) })
|
||||
|
||||
dir := t.TempDir()
|
||||
expSessionFile := path.Join(dir, "session")
|
||||
|
||||
const inputToken = "test-keyring-token-12345"
|
||||
err = backend.Write(srvURL, inputToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify no session file was created (keyring stores in OS keyring, not file)
|
||||
_, err = os.Stat(expSessionFile)
|
||||
require.True(t, errors.Is(err, os.ErrNotExist), "expected session token file to not exist when using keyring")
|
||||
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, inputToken, token)
|
||||
|
||||
// Clean up
|
||||
err = backend.Delete(srvURL)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("WriteAndDelete", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = backend.Delete(srvURL) })
|
||||
|
||||
const inputToken = "test-keyring-token-67890"
|
||||
err = backend.Write(srvURL, inputToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, inputToken, token)
|
||||
|
||||
err = backend.Delete(srvURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = backend.Read(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, os.IsNotExist(err), "expected os.ErrNotExist after deleting token")
|
||||
})
|
||||
|
||||
t.Run("OverwriteToken", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = backend.Delete(srvURL) })
|
||||
|
||||
// Write first token
|
||||
const firstToken = "first-keyring-token"
|
||||
err = backend.Write(srvURL, firstToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstToken, token)
|
||||
|
||||
// Overwrite with second token
|
||||
const secondToken = "second-keyring-token"
|
||||
err = backend.Write(srvURL, secondToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
token, err = backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, secondToken, token)
|
||||
|
||||
// Clean up
|
||||
err = backend.Delete(srvURL)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("MultipleServers", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
srvURL2, err := url.Parse(testURL2)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = backend.Delete(srvURL)
|
||||
_ = backend.Delete(srvURL2)
|
||||
})
|
||||
|
||||
// Write token for server 1
|
||||
const token1 = "token-for-server-1"
|
||||
err = backend.Write(srvURL, token1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write token for server 2 (should NOT overwrite server 1)
|
||||
const token2 = "token-for-server-2"
|
||||
err = backend.Write(srvURL2, token2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Read server 1's credential
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, token1, token)
|
||||
|
||||
// Read server 2's credential
|
||||
token, err = backend.Read(srvURL2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, token2, token)
|
||||
|
||||
// Delete server 1's credential
|
||||
err = backend.Delete(srvURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify server 1's credential is gone
|
||||
_, err = backend.Read(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// Verify server 2's credential still exists
|
||||
token, err = backend.Read(srvURL2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, token2, token)
|
||||
|
||||
// Clean up remaining credentials
|
||||
err = backend.Delete(srvURL2)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFile(t *testing.T) {
|
||||
const (
|
||||
testURL = "http://127.0.0.1:1337"
|
||||
testURL2 = "http://127.0.0.1:1338"
|
||||
)
|
||||
|
||||
t.Parallel()
|
||||
|
||||
t.Run("ReadNonExistent", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dir := t.TempDir()
|
||||
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = backend.Read(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, os.IsNotExist(err))
|
||||
})
|
||||
|
||||
t.Run("WriteAndRead", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dir := t.TempDir()
|
||||
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write a token
|
||||
const inputToken = "test-token-12345"
|
||||
err = backend.Write(srvURL, inputToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the session file was created
|
||||
sessionFile := config.Root(dir).Session()
|
||||
require.True(t, sessionFile.Exists())
|
||||
|
||||
// Read the token back
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, inputToken, token)
|
||||
})
|
||||
|
||||
t.Run("WriteAndDelete", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dir := t.TempDir()
|
||||
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write a token
|
||||
const inputToken = "test-token-67890"
|
||||
err = backend.Write(srvURL, inputToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the token was written
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, inputToken, token)
|
||||
|
||||
// Delete the token
|
||||
err = backend.Delete(srvURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the token is gone
|
||||
_, err = backend.Read(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, os.IsNotExist(err))
|
||||
})
|
||||
|
||||
t.Run("DeleteNonExistent", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dir := t.TempDir()
|
||||
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Attempt to delete a non-existent token
|
||||
err = backend.Delete(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, os.IsNotExist(err))
|
||||
})
|
||||
|
||||
t.Run("OverwriteToken", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dir := t.TempDir()
|
||||
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write first token
|
||||
const firstToken = "first-token"
|
||||
err = backend.Write(srvURL, firstToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstToken, token)
|
||||
|
||||
// Overwrite with second token
|
||||
const secondToken = "second-token"
|
||||
err = backend.Write(srvURL, secondToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
token, err = backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, secondToken, token)
|
||||
})
|
||||
|
||||
t.Run("WriteIgnoresURL", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dir := t.TempDir()
|
||||
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
srvURL2, err := url.Parse(testURL2)
|
||||
require.NoError(t, err)
|
||||
|
||||
//nolint:gosec // Write with first URL test token
|
||||
const firstToken = "token-for-url1"
|
||||
err = backend.Write(srvURL, firstToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
//nolint:gosec // Write with second URL - should overwrite
|
||||
const secondToken = "token-for-url2"
|
||||
err = backend.Write(srvURL2, secondToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should have the second token (File backend doesn't differentiate by URL)
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, secondToken, token)
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,66 @@
|
||||
//go:build windows
|
||||
|
||||
package sessionstore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/danieljoos/wincred"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultServiceName is the service name used in the Windows Credential Manager
|
||||
// for storing Coder CLI session tokens.
|
||||
defaultServiceName = "coder-v2-credentials"
|
||||
)
|
||||
|
||||
// operatingSystemKeyring implements keyringProvider and uses Windows Credential Manager.
|
||||
// It is largely adapted from the zalando/go-keyring package.
|
||||
type operatingSystemKeyring struct{}
|
||||
|
||||
func (operatingSystemKeyring) Set(service, credential string) error {
|
||||
// password may not exceed 2560 bytes (https://github.com/jaraco/keyring/issues/540#issuecomment-968329967)
|
||||
if len(credential) > 2560 {
|
||||
return ErrSetDataTooBig
|
||||
}
|
||||
|
||||
// service may not exceed 512 bytes (might need more testing)
|
||||
if len(service) >= 512 {
|
||||
return ErrSetDataTooBig
|
||||
}
|
||||
|
||||
// service may not exceed 32k but problems occur before that
|
||||
// so we limit it to 30k
|
||||
if len(service) > 1024*30 {
|
||||
return ErrSetDataTooBig
|
||||
}
|
||||
|
||||
cred := wincred.NewGenericCredential(service)
|
||||
cred.CredentialBlob = []byte(credential)
|
||||
cred.Persist = wincred.PersistLocalMachine
|
||||
return cred.Write()
|
||||
}
|
||||
|
||||
func (operatingSystemKeyring) Get(service string) ([]byte, error) {
|
||||
cred, err := wincred.GetGenericCredential(service)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.ERROR_NOT_FOUND) {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return cred.CredentialBlob, nil
|
||||
}
|
||||
|
||||
func (operatingSystemKeyring) Delete(service string) error {
|
||||
cred, err := wincred.GetGenericCredential(service)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.ERROR_NOT_FOUND) {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
return err
|
||||
}
|
||||
return cred.Delete()
|
||||
}
|
||||
@@ -0,0 +1,127 @@
|
||||
//go:build windows
|
||||
|
||||
package sessionstore_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/danieljoos/wincred"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/cli/sessionstore"
|
||||
)
|
||||
|
||||
func TestWindowsKeyring_WriteReadDelete(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const testURL = "http://127.0.0.1:1337"
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
serviceName := keyringTestServiceName(t)
|
||||
backend := sessionstore.NewKeyringWithService(serviceName)
|
||||
t.Cleanup(func() { _ = backend.Delete(srvURL) })
|
||||
|
||||
// Verify no token exists initially
|
||||
_, err = backend.Read(srvURL)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
|
||||
// Write a token
|
||||
const inputToken = "test-token-12345"
|
||||
err = backend.Write(srvURL, inputToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the credential is stored in Windows Credential Manager with correct format
|
||||
winCred, err := wincred.GetGenericCredential(serviceName)
|
||||
require.NoError(t, err, "getting windows credential")
|
||||
|
||||
var storedCreds map[string]struct {
|
||||
CoderURL string `json:"coder_url"`
|
||||
APIToken string `json:"api_token"`
|
||||
}
|
||||
err = json.Unmarshal(winCred.CredentialBlob, &storedCreds)
|
||||
require.NoError(t, err, "unmarshalling stored credentials")
|
||||
|
||||
// Verify the stored values
|
||||
require.Len(t, storedCreds, 1)
|
||||
cred, ok := storedCreds[srvURL.Host]
|
||||
require.True(t, ok, "credential for URL should exist")
|
||||
require.Equal(t, inputToken, cred.APIToken)
|
||||
require.Equal(t, srvURL.Host, cred.CoderURL)
|
||||
|
||||
// Read the token back
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, inputToken, token)
|
||||
|
||||
// Delete the token
|
||||
err = backend.Delete(srvURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify token is deleted
|
||||
_, err = backend.Read(srvURL)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
}
|
||||
|
||||
func TestWindowsKeyring_MultipleServers(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const testURL1 = "http://127.0.0.1:1337"
|
||||
srv1URL, err := url.Parse(testURL1)
|
||||
require.NoError(t, err)
|
||||
|
||||
const testURL2 = "http://127.0.0.1:1338"
|
||||
srv2URL, err := url.Parse(testURL2)
|
||||
require.NoError(t, err)
|
||||
|
||||
serviceName := keyringTestServiceName(t)
|
||||
backend := sessionstore.NewKeyringWithService(serviceName)
|
||||
t.Cleanup(func() {
|
||||
_ = backend.Delete(srv1URL)
|
||||
_ = backend.Delete(srv2URL)
|
||||
})
|
||||
|
||||
// Write token for server 1
|
||||
const token1 = "token-server-1"
|
||||
err = backend.Write(srv1URL, token1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write token for server 2 (should NOT overwrite server 1's token)
|
||||
const token2 = "token-server-2"
|
||||
err = backend.Write(srv2URL, token2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify both credentials are stored in Windows Credential Manager
|
||||
winCred, err := wincred.GetGenericCredential(serviceName)
|
||||
require.NoError(t, err, "getting windows credential")
|
||||
|
||||
var storedCreds map[string]struct {
|
||||
CoderURL string `json:"coder_url"`
|
||||
APIToken string `json:"api_token"`
|
||||
}
|
||||
err = json.Unmarshal(winCred.CredentialBlob, &storedCreds)
|
||||
require.NoError(t, err, "unmarshalling stored credentials")
|
||||
|
||||
// Both credentials should exist
|
||||
require.Len(t, storedCreds, 2)
|
||||
require.Equal(t, token1, storedCreds[srv1URL.Host].APIToken)
|
||||
require.Equal(t, token2, storedCreds[srv2URL.Host].APIToken)
|
||||
|
||||
// Read individual credentials
|
||||
token, err := backend.Read(srv1URL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, token1, token)
|
||||
|
||||
token, err = backend.Read(srv2URL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, token2, token)
|
||||
|
||||
// Cleanup
|
||||
err = backend.Delete(srv1URL)
|
||||
require.NoError(t, err)
|
||||
err = backend.Delete(srv2URL)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -54,6 +54,7 @@ func TestSharingShare(t *testing.T) {
|
||||
MinimalUser: codersdk.MinimalUser{
|
||||
ID: toShareWithUser.ID,
|
||||
Username: toShareWithUser.Username,
|
||||
Name: toShareWithUser.Name,
|
||||
AvatarURL: toShareWithUser.AvatarURL,
|
||||
},
|
||||
Role: codersdk.WorkspaceRole("use"),
|
||||
@@ -103,6 +104,7 @@ func TestSharingShare(t *testing.T) {
|
||||
MinimalUser: codersdk.MinimalUser{
|
||||
ID: toShareWithUser1.ID,
|
||||
Username: toShareWithUser1.Username,
|
||||
Name: toShareWithUser1.Name,
|
||||
AvatarURL: toShareWithUser1.AvatarURL,
|
||||
},
|
||||
Role: codersdk.WorkspaceRoleUse,
|
||||
@@ -111,6 +113,7 @@ func TestSharingShare(t *testing.T) {
|
||||
MinimalUser: codersdk.MinimalUser{
|
||||
ID: toShareWithUser2.ID,
|
||||
Username: toShareWithUser2.Username,
|
||||
Name: toShareWithUser2.Name,
|
||||
AvatarURL: toShareWithUser2.AvatarURL,
|
||||
},
|
||||
Role: codersdk.WorkspaceRoleUse,
|
||||
@@ -155,6 +158,7 @@ func TestSharingShare(t *testing.T) {
|
||||
MinimalUser: codersdk.MinimalUser{
|
||||
ID: toShareWithUser.ID,
|
||||
Username: toShareWithUser.Username,
|
||||
Name: toShareWithUser.Name,
|
||||
AvatarURL: toShareWithUser.AvatarURL,
|
||||
},
|
||||
Role: codersdk.WorkspaceRoleAdmin,
|
||||
|
||||
+47
@@ -109,6 +109,51 @@ func (r *RootCmd) ssh() *serpent.Command {
|
||||
}
|
||||
},
|
||||
),
|
||||
CompletionHandler: func(inv *serpent.Invocation) []string {
|
||||
client, err := r.InitClient(inv)
|
||||
if err != nil {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
res, err := client.Workspaces(inv.Context(), codersdk.WorkspaceFilter{
|
||||
Owner: codersdk.Me,
|
||||
})
|
||||
if err != nil {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
var mu sync.Mutex
|
||||
var completions []string
|
||||
var wg sync.WaitGroup
|
||||
for _, ws := range res.Workspaces {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
resources, err := client.TemplateVersionResources(inv.Context(), ws.LatestBuild.TemplateVersionID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var agents []codersdk.WorkspaceAgent
|
||||
for _, resource := range resources {
|
||||
agents = append(agents, resource.Agents...)
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if len(agents) == 1 {
|
||||
completions = append(completions, ws.Name)
|
||||
} else {
|
||||
for _, agent := range agents {
|
||||
completions = append(completions, fmt.Sprintf("%s.%s", ws.Name, agent.Name))
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
slices.Sort(completions)
|
||||
return completions
|
||||
},
|
||||
Handler: func(inv *serpent.Invocation) (retErr error) {
|
||||
client, err := r.InitClient(inv)
|
||||
if err != nil {
|
||||
@@ -906,6 +951,8 @@ func GetWorkspaceAndAgent(ctx context.Context, inv *serpent.Invocation, client *
|
||||
return codersdk.Workspace{}, codersdk.WorkspaceAgent{}, nil, xerrors.Errorf("start workspace with active template version: %w", err)
|
||||
}
|
||||
_, _ = fmt.Fprintln(inv.Stdout, "Unable to start the workspace with template version from last build. Your workspace has been updated to the current active template version.")
|
||||
default:
|
||||
return codersdk.Workspace{}, codersdk.WorkspaceAgent{}, nil, xerrors.Errorf("start workspace with current template version: %w", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return codersdk.Workspace{}, codersdk.WorkspaceAgent{}, nil, xerrors.Errorf("start workspace with current template version: %w", err)
|
||||
|
||||
@@ -2447,3 +2447,99 @@ func tempDirUnixSocket(t *testing.T) string {
|
||||
|
||||
return t.TempDir()
|
||||
}
|
||||
|
||||
func TestSSH_Completion(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("SingleAgent", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, workspace, agentToken := setupWorkspaceForAgent(t)
|
||||
_ = agenttest.New(t, client.URL, agentToken)
|
||||
coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
|
||||
|
||||
var stdout bytes.Buffer
|
||||
inv, root := clitest.New(t, "ssh", "")
|
||||
inv.Stdout = &stdout
|
||||
inv.Environ.Set("COMPLETION_MODE", "1")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitMedium)
|
||||
defer cancel()
|
||||
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
// For single-agent workspaces, the only completion should be the
|
||||
// bare workspace name.
|
||||
output := stdout.String()
|
||||
t.Logf("Completion output: %q", output)
|
||||
require.Contains(t, output, workspace.Name)
|
||||
})
|
||||
|
||||
t.Run("MultiAgent", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, store := coderdtest.NewWithDatabase(t, nil)
|
||||
first := coderdtest.CreateFirstUser(t, client)
|
||||
userClient, user := coderdtest.CreateAnotherUserMutators(t, client, first.OrganizationID, nil, func(r *codersdk.CreateUserRequestWithOrgs) {
|
||||
r.Username = "multiuser"
|
||||
})
|
||||
|
||||
r := dbfake.WorkspaceBuild(t, store, database.WorkspaceTable{
|
||||
Name: "multiworkspace",
|
||||
OrganizationID: first.OrganizationID,
|
||||
OwnerID: user.ID,
|
||||
}).WithAgent(func(agents []*proto.Agent) []*proto.Agent {
|
||||
return []*proto.Agent{
|
||||
{
|
||||
Name: "agent1",
|
||||
Auth: &proto.Agent_Token{},
|
||||
},
|
||||
{
|
||||
Name: "agent2",
|
||||
Auth: &proto.Agent_Token{},
|
||||
},
|
||||
}
|
||||
}).Do()
|
||||
|
||||
var stdout bytes.Buffer
|
||||
inv, root := clitest.New(t, "ssh", "")
|
||||
inv.Stdout = &stdout
|
||||
inv.Environ.Set("COMPLETION_MODE", "1")
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitMedium)
|
||||
defer cancel()
|
||||
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
// For multi-agent workspaces, completions should include the
|
||||
// workspace.agent format but NOT the bare workspace name.
|
||||
output := stdout.String()
|
||||
t.Logf("Completion output: %q", output)
|
||||
lines := strings.Split(strings.TrimSpace(output), "\n")
|
||||
require.NotContains(t, lines, r.Workspace.Name)
|
||||
require.Contains(t, output, r.Workspace.Name+".agent1")
|
||||
require.Contains(t, output, r.Workspace.Name+".agent2")
|
||||
})
|
||||
|
||||
t.Run("NetworkError", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var stdout bytes.Buffer
|
||||
inv, _ := clitest.New(t, "ssh", "")
|
||||
inv.Stdout = &stdout
|
||||
inv.Environ.Set("COMPLETION_MODE", "1")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
output := stdout.String()
|
||||
require.Empty(t, output)
|
||||
})
|
||||
}
|
||||
|
||||
Vendored
+5
@@ -108,6 +108,11 @@ variables or flags.
|
||||
--url url, $CODER_URL
|
||||
URL to a deployment.
|
||||
|
||||
--use-keyring bool, $CODER_USE_KEYRING
|
||||
Store and retrieve session tokens using the operating system keyring.
|
||||
Currently only supported on Windows. By default, tokens are stored in
|
||||
plain text files.
|
||||
|
||||
-v, --verbose bool, $CODER_VERBOSE
|
||||
Enable verbose output.
|
||||
|
||||
|
||||
+2
-1
@@ -90,6 +90,7 @@
|
||||
"allow_renames": false,
|
||||
"favorite": false,
|
||||
"next_start_at": "====[timestamp]=====",
|
||||
"is_prebuild": false
|
||||
"is_prebuild": false,
|
||||
"task_id": null
|
||||
}
|
||||
]
|
||||
|
||||
+4
@@ -5,6 +5,10 @@ USAGE:
|
||||
|
||||
Authenticate with Coder deployment
|
||||
|
||||
By default, the session token is stored in a plain text file. Use the
|
||||
--use-keyring flag or set CODER_USE_KEYRING=true to store the token in the
|
||||
operating system keyring instead.
|
||||
|
||||
OPTIONS:
|
||||
--first-user-email string, $CODER_FIRST_USER_EMAIL
|
||||
Specifies an email address to use if creating the first user for the
|
||||
|
||||
+35
@@ -80,6 +80,41 @@ OPTIONS:
|
||||
Periodically check for new releases of Coder and inform the owner. The
|
||||
check is performed once per day.
|
||||
|
||||
AIBRIDGE OPTIONS:
|
||||
--aibridge-anthropic-base-url string, $CODER_AIBRIDGE_ANTHROPIC_BASE_URL (default: https://api.anthropic.com/)
|
||||
The base URL of the Anthropic API.
|
||||
|
||||
--aibridge-anthropic-key string, $CODER_AIBRIDGE_ANTHROPIC_KEY
|
||||
The key to authenticate against the Anthropic API.
|
||||
|
||||
--aibridge-bedrock-access-key string, $CODER_AIBRIDGE_BEDROCK_ACCESS_KEY
|
||||
The access key to authenticate against the AWS Bedrock API.
|
||||
|
||||
--aibridge-bedrock-access-key-secret string, $CODER_AIBRIDGE_BEDROCK_ACCESS_KEY_SECRET
|
||||
The access key secret to use with the access key to authenticate
|
||||
against the AWS Bedrock API.
|
||||
|
||||
--aibridge-bedrock-model string, $CODER_AIBRIDGE_BEDROCK_MODEL (default: global.anthropic.claude-sonnet-4-5-20250929-v1:0)
|
||||
The model to use when making requests to the AWS Bedrock API.
|
||||
|
||||
--aibridge-bedrock-region string, $CODER_AIBRIDGE_BEDROCK_REGION
|
||||
The AWS Bedrock API region.
|
||||
|
||||
--aibridge-bedrock-small-fastmodel string, $CODER_AIBRIDGE_BEDROCK_SMALL_FAST_MODEL (default: global.anthropic.claude-haiku-4-5-20251001-v1:0)
|
||||
The small fast model to use when making requests to the AWS Bedrock
|
||||
API. Claude Code uses Haiku-class models to perform background tasks.
|
||||
See
|
||||
https://docs.claude.com/en/docs/claude-code/settings#environment-variables.
|
||||
|
||||
--aibridge-enabled bool, $CODER_AIBRIDGE_ENABLED (default: false)
|
||||
Whether to start an in-memory aibridged instance.
|
||||
|
||||
--aibridge-openai-base-url string, $CODER_AIBRIDGE_OPENAI_BASE_URL (default: https://api.openai.com/v1/)
|
||||
The base URL of the OpenAI API.
|
||||
|
||||
--aibridge-openai-key string, $CODER_AIBRIDGE_OPENAI_KEY
|
||||
The key to authenticate against the OpenAI API.
|
||||
|
||||
CLIENT OPTIONS:
|
||||
These options change the behavior of how clients interact with the Coder.
|
||||
Clients include the Coder CLI, Coder Desktop, IDE extensions, and the web UI.
|
||||
|
||||
+5
@@ -16,6 +16,10 @@ USAGE:
|
||||
|
||||
$ coder tokens ls
|
||||
|
||||
- Create a scoped token:
|
||||
|
||||
$ coder tokens create --scope workspace:read --allow workspace:<uuid>
|
||||
|
||||
- Remove a token by ID:
|
||||
|
||||
$ coder tokens rm WuoWs4ZsMX
|
||||
@@ -24,6 +28,7 @@ SUBCOMMANDS:
|
||||
create Create a token
|
||||
list List tokens
|
||||
remove Delete a token
|
||||
view Display detailed information about a token
|
||||
|
||||
———
|
||||
Run `coder --help` for a list of global options.
|
||||
|
||||
+9
-1
@@ -6,12 +6,20 @@ USAGE:
|
||||
Create a token
|
||||
|
||||
OPTIONS:
|
||||
--allow allow-list
|
||||
Repeatable allow-list entry (<type>:<uuid>, e.g. workspace:1234-...).
|
||||
|
||||
--lifetime string, $CODER_TOKEN_LIFETIME
|
||||
Specify a duration for the lifetime of the token.
|
||||
Duration for the token lifetime. Supports standard Go duration units
|
||||
(ns, us, ms, s, m, h) plus d (days) and y (years). Examples: 8h, 30d,
|
||||
1y, 1d12h30m.
|
||||
|
||||
-n, --name string, $CODER_TOKEN_NAME
|
||||
Specify a human-readable name.
|
||||
|
||||
--scope string-array
|
||||
Repeatable scope to attach to the token (e.g. workspace:read).
|
||||
|
||||
-u, --user string, $CODER_TOKEN_USER
|
||||
Specify the user to create the token for (Only works if logged in user
|
||||
is admin).
|
||||
|
||||
+1
-1
@@ -12,7 +12,7 @@ OPTIONS:
|
||||
Specifies whether all users' tokens will be listed or not (must have
|
||||
Owner role to see all tokens).
|
||||
|
||||
-c, --column [id|name|last used|expires at|created at|owner] (default: id,name,last used,expires at,created at)
|
||||
-c, --column [id|name|scopes|allow list|last used|expires at|created at|owner] (default: id,name,scopes,allow list,last used,expires at,created at)
|
||||
Columns to display in table output.
|
||||
|
||||
-o, --output table|json (default: table)
|
||||
|
||||
+16
@@ -0,0 +1,16 @@
|
||||
coder v0.0.0-devel
|
||||
|
||||
USAGE:
|
||||
coder tokens view [flags] <name|id>
|
||||
|
||||
Display detailed information about a token
|
||||
|
||||
OPTIONS:
|
||||
-c, --column [id|name|scopes|allow list|last used|expires at|created at|owner] (default: id,name,scopes,allow list,last used,expires at,created at,owner)
|
||||
Columns to display in table output.
|
||||
|
||||
-o, --output table|json (default: table)
|
||||
Output format.
|
||||
|
||||
———
|
||||
Run `coder --help` for a list of global options.
|
||||
+1
-1
@@ -8,7 +8,7 @@ USAGE:
|
||||
Aliases: ls
|
||||
|
||||
OPTIONS:
|
||||
-c, --column [id|username|email|created at|updated at|status] (default: username,email,created at,status)
|
||||
-c, --column [id|username|name|email|created at|updated at|status] (default: username,email,created at,status)
|
||||
Columns to display in table output.
|
||||
|
||||
--github-user-id int
|
||||
|
||||
+21
-4
@@ -714,8 +714,7 @@ workspace_prebuilds:
|
||||
# (default: 3, type: int)
|
||||
failure_hard_limit: 3
|
||||
aibridge:
|
||||
# Whether to start an in-memory aibridged instance ("aibridge" experiment must be
|
||||
# enabled, too).
|
||||
# Whether to start an in-memory aibridged instance.
|
||||
# (default: false, type: bool)
|
||||
enabled: false
|
||||
# The base URL of the OpenAI API.
|
||||
@@ -726,7 +725,25 @@ aibridge:
|
||||
openai_key: ""
|
||||
# The base URL of the Anthropic API.
|
||||
# (default: https://api.anthropic.com/, type: string)
|
||||
base_url: https://api.anthropic.com/
|
||||
anthropic_base_url: https://api.anthropic.com/
|
||||
# The key to authenticate against the Anthropic API.
|
||||
# (default: <unset>, type: string)
|
||||
key: ""
|
||||
anthropic_key: ""
|
||||
# The AWS Bedrock API region.
|
||||
# (default: <unset>, type: string)
|
||||
bedrock_region: ""
|
||||
# The access key to authenticate against the AWS Bedrock API.
|
||||
# (default: <unset>, type: string)
|
||||
bedrock_access_key: ""
|
||||
# The access key secret to use with the access key to authenticate against the AWS
|
||||
# Bedrock API.
|
||||
# (default: <unset>, type: string)
|
||||
bedrock_access_key_secret: ""
|
||||
# The model to use when making requests to the AWS Bedrock API.
|
||||
# (default: global.anthropic.claude-sonnet-4-5-20250929-v1:0, type: string)
|
||||
bedrock_model: global.anthropic.claude-sonnet-4-5-20250929-v1:0
|
||||
# The small fast model to use when making requests to the AWS Bedrock API. Claude
|
||||
# Code uses Haiku-class models to perform background tasks. See
|
||||
# https://docs.claude.com/en/docs/claude-code/settings#environment-variables.
|
||||
# (default: global.anthropic.claude-haiku-4-5-20251001-v1:0, type: string)
|
||||
bedrock_small_fast_model: global.anthropic.claude-haiku-4-5-20251001-v1:0
|
||||
|
||||
+104
-6
@@ -4,12 +4,14 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/coderd/util/slice"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
@@ -27,6 +29,10 @@ func (r *RootCmd) tokens() *serpent.Command {
|
||||
Description: "List your tokens",
|
||||
Command: "coder tokens ls",
|
||||
},
|
||||
Example{
|
||||
Description: "Create a scoped token",
|
||||
Command: "coder tokens create --scope workspace:read --allow workspace:<uuid>",
|
||||
},
|
||||
Example{
|
||||
Description: "Remove a token by ID",
|
||||
Command: "coder tokens rm WuoWs4ZsMX",
|
||||
@@ -39,6 +45,7 @@ func (r *RootCmd) tokens() *serpent.Command {
|
||||
Children: []*serpent.Command{
|
||||
r.createToken(),
|
||||
r.listTokens(),
|
||||
r.viewToken(),
|
||||
r.removeToken(),
|
||||
},
|
||||
}
|
||||
@@ -50,6 +57,8 @@ func (r *RootCmd) createToken() *serpent.Command {
|
||||
tokenLifetime string
|
||||
name string
|
||||
user string
|
||||
scopes []string
|
||||
allowList []codersdk.APIAllowListTarget
|
||||
)
|
||||
cmd := &serpent.Command{
|
||||
Use: "create",
|
||||
@@ -88,10 +97,18 @@ func (r *RootCmd) createToken() *serpent.Command {
|
||||
}
|
||||
}
|
||||
|
||||
res, err := client.CreateToken(inv.Context(), userID, codersdk.CreateTokenRequest{
|
||||
req := codersdk.CreateTokenRequest{
|
||||
Lifetime: parsedLifetime,
|
||||
TokenName: name,
|
||||
})
|
||||
}
|
||||
if len(req.Scopes) == 0 {
|
||||
req.Scopes = slice.StringEnums[codersdk.APIKeyScope](scopes)
|
||||
}
|
||||
if len(allowList) > 0 {
|
||||
req.AllowList = append([]codersdk.APIAllowListTarget(nil), allowList...)
|
||||
}
|
||||
|
||||
res, err := client.CreateToken(inv.Context(), userID, req)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create tokens: %w", err)
|
||||
}
|
||||
@@ -106,7 +123,7 @@ func (r *RootCmd) createToken() *serpent.Command {
|
||||
{
|
||||
Flag: "lifetime",
|
||||
Env: "CODER_TOKEN_LIFETIME",
|
||||
Description: "Specify a duration for the lifetime of the token.",
|
||||
Description: "Duration for the token lifetime. Supports standard Go duration units (ns, us, ms, s, m, h) plus d (days) and y (years). Examples: 8h, 30d, 1y, 1d12h30m.",
|
||||
Value: serpent.StringOf(&tokenLifetime),
|
||||
},
|
||||
{
|
||||
@@ -123,6 +140,16 @@ func (r *RootCmd) createToken() *serpent.Command {
|
||||
Description: "Specify the user to create the token for (Only works if logged in user is admin).",
|
||||
Value: serpent.StringOf(&user),
|
||||
},
|
||||
{
|
||||
Flag: "scope",
|
||||
Description: "Repeatable scope to attach to the token (e.g. workspace:read).",
|
||||
Value: serpent.StringArrayOf(&scopes),
|
||||
},
|
||||
{
|
||||
Flag: "allow",
|
||||
Description: "Repeatable allow-list entry (<type>:<uuid>, e.g. workspace:1234-...).",
|
||||
Value: AllowListFlagOf(&allowList),
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
@@ -136,6 +163,8 @@ type tokenListRow struct {
|
||||
// For table format:
|
||||
ID string `json:"-" table:"id,default_sort"`
|
||||
TokenName string `json:"token_name" table:"name"`
|
||||
Scopes string `json:"-" table:"scopes"`
|
||||
Allow string `json:"-" table:"allow list"`
|
||||
LastUsed time.Time `json:"-" table:"last used"`
|
||||
ExpiresAt time.Time `json:"-" table:"expires at"`
|
||||
CreatedAt time.Time `json:"-" table:"created at"`
|
||||
@@ -143,20 +172,47 @@ type tokenListRow struct {
|
||||
}
|
||||
|
||||
func tokenListRowFromToken(token codersdk.APIKeyWithOwner) tokenListRow {
|
||||
return tokenListRowFromKey(token.APIKey, token.Username)
|
||||
}
|
||||
|
||||
func tokenListRowFromKey(token codersdk.APIKey, owner string) tokenListRow {
|
||||
return tokenListRow{
|
||||
APIKey: token.APIKey,
|
||||
APIKey: token,
|
||||
ID: token.ID,
|
||||
TokenName: token.TokenName,
|
||||
Scopes: joinScopes(token.Scopes),
|
||||
Allow: joinAllowList(token.AllowList),
|
||||
LastUsed: token.LastUsed,
|
||||
ExpiresAt: token.ExpiresAt,
|
||||
CreatedAt: token.CreatedAt,
|
||||
Owner: token.Username,
|
||||
Owner: owner,
|
||||
}
|
||||
}
|
||||
|
||||
func joinScopes(scopes []codersdk.APIKeyScope) string {
|
||||
if len(scopes) == 0 {
|
||||
return ""
|
||||
}
|
||||
vals := slice.ToStrings(scopes)
|
||||
sort.Strings(vals)
|
||||
return strings.Join(vals, ", ")
|
||||
}
|
||||
|
||||
func joinAllowList(entries []codersdk.APIAllowListTarget) string {
|
||||
if len(entries) == 0 {
|
||||
return ""
|
||||
}
|
||||
vals := make([]string, len(entries))
|
||||
for i, entry := range entries {
|
||||
vals[i] = entry.String()
|
||||
}
|
||||
sort.Strings(vals)
|
||||
return strings.Join(vals, ", ")
|
||||
}
|
||||
|
||||
func (r *RootCmd) listTokens() *serpent.Command {
|
||||
// we only display the 'owner' column if the --all argument is passed in
|
||||
defaultCols := []string{"id", "name", "last used", "expires at", "created at"}
|
||||
defaultCols := []string{"id", "name", "scopes", "allow list", "last used", "expires at", "created at"}
|
||||
if slices.Contains(os.Args, "-a") || slices.Contains(os.Args, "--all") {
|
||||
defaultCols = append(defaultCols, "owner")
|
||||
}
|
||||
@@ -226,6 +282,48 @@ func (r *RootCmd) listTokens() *serpent.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (r *RootCmd) viewToken() *serpent.Command {
|
||||
formatter := cliui.NewOutputFormatter(
|
||||
cliui.TableFormat([]tokenListRow{}, []string{"id", "name", "scopes", "allow list", "last used", "expires at", "created at", "owner"}),
|
||||
cliui.JSONFormat(),
|
||||
)
|
||||
|
||||
cmd := &serpent.Command{
|
||||
Use: "view <name|id>",
|
||||
Short: "Display detailed information about a token",
|
||||
Middleware: serpent.Chain(
|
||||
serpent.RequireNArgs(1),
|
||||
),
|
||||
Handler: func(inv *serpent.Invocation) error {
|
||||
client, err := r.InitClient(inv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tokenName := inv.Args[0]
|
||||
token, err := client.APIKeyByName(inv.Context(), codersdk.Me, tokenName)
|
||||
if err != nil {
|
||||
maybeID := strings.Split(tokenName, "-")[0]
|
||||
token, err = client.APIKeyByID(inv.Context(), codersdk.Me, maybeID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fetch api key by name or id: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
row := tokenListRowFromKey(*token, "")
|
||||
out, err := formatter.Format(inv.Context(), []tokenListRow{row})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprintln(inv.Stdout, out)
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
formatter.AttachOptions(&cmd.Options)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (r *RootCmd) removeToken() *serpent.Command {
|
||||
cmd := &serpent.Command{
|
||||
Use: "remove <name|id|token>",
|
||||
|
||||
+56
-3
@@ -4,10 +4,13 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
@@ -46,6 +49,18 @@ func TestTokens(t *testing.T) {
|
||||
require.NotEmpty(t, res)
|
||||
id := res[:10]
|
||||
|
||||
allowWorkspaceID := uuid.New()
|
||||
allowSpec := fmt.Sprintf("workspace:%s", allowWorkspaceID.String())
|
||||
inv, root = clitest.New(t, "tokens", "create", "--name", "scoped-token", "--scope", string(codersdk.APIKeyScopeWorkspaceRead), "--allow", allowSpec)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf = new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
err = inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
res = buf.String()
|
||||
require.NotEmpty(t, res)
|
||||
scopedTokenID := res[:10]
|
||||
|
||||
// Test creating a token for second user from first user's (admin) session
|
||||
inv, root = clitest.New(t, "tokens", "create", "--name", "token-two", "--user", secondUser.ID.String())
|
||||
clitest.SetupConfig(t, client, root)
|
||||
@@ -67,7 +82,7 @@ func TestTokens(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
res = buf.String()
|
||||
require.NotEmpty(t, res)
|
||||
// Result should only contain the token created for the admin user
|
||||
// Result should only contain the tokens created for the admin user
|
||||
require.Contains(t, res, "ID")
|
||||
require.Contains(t, res, "EXPIRES AT")
|
||||
require.Contains(t, res, "CREATED AT")
|
||||
@@ -76,6 +91,16 @@ func TestTokens(t *testing.T) {
|
||||
// Result should not contain the token created for the second user
|
||||
require.NotContains(t, res, secondTokenID)
|
||||
|
||||
inv, root = clitest.New(t, "tokens", "view", "scoped-token")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf = new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
err = inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
res = buf.String()
|
||||
require.Contains(t, res, string(codersdk.APIKeyScopeWorkspaceRead))
|
||||
require.Contains(t, res, allowSpec)
|
||||
|
||||
// Test listing tokens from the second user's session
|
||||
inv, root = clitest.New(t, "tokens", "ls")
|
||||
clitest.SetupConfig(t, secondUserClient, root)
|
||||
@@ -101,6 +126,14 @@ func TestTokens(t *testing.T) {
|
||||
// User (non-admin) should not be able to create a token for another user
|
||||
require.Error(t, err)
|
||||
|
||||
inv, root = clitest.New(t, "tokens", "create", "--name", "invalid-allow", "--allow", "badvalue")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf = new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
err = inv.WithContext(ctx).Run()
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "invalid allow_list entry")
|
||||
|
||||
inv, root = clitest.New(t, "tokens", "ls", "--output=json")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf = new(bytes.Buffer)
|
||||
@@ -110,8 +143,17 @@ func TestTokens(t *testing.T) {
|
||||
|
||||
var tokens []codersdk.APIKey
|
||||
require.NoError(t, json.Unmarshal(buf.Bytes(), &tokens))
|
||||
require.Len(t, tokens, 1)
|
||||
require.Equal(t, id, tokens[0].ID)
|
||||
require.Len(t, tokens, 2)
|
||||
tokenByName := make(map[string]codersdk.APIKey, len(tokens))
|
||||
for _, tk := range tokens {
|
||||
tokenByName[tk.TokenName] = tk
|
||||
}
|
||||
require.Contains(t, tokenByName, "token-one")
|
||||
require.Contains(t, tokenByName, "scoped-token")
|
||||
scopedToken := tokenByName["scoped-token"]
|
||||
require.Contains(t, scopedToken.Scopes, codersdk.APIKeyScopeWorkspaceRead)
|
||||
require.Len(t, scopedToken.AllowList, 1)
|
||||
require.Equal(t, allowSpec, scopedToken.AllowList[0].String())
|
||||
|
||||
// Delete by name
|
||||
inv, root = clitest.New(t, "tokens", "rm", "token-one")
|
||||
@@ -135,6 +177,17 @@ func TestTokens(t *testing.T) {
|
||||
require.NotEmpty(t, res)
|
||||
require.Contains(t, res, "deleted")
|
||||
|
||||
// Delete scoped token by ID
|
||||
inv, root = clitest.New(t, "tokens", "rm", scopedTokenID)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf = new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
err = inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
res = buf.String()
|
||||
require.NotEmpty(t, res)
|
||||
require.Contains(t, res, "deleted")
|
||||
|
||||
// Create third token
|
||||
inv, root = clitest.New(t, "tokens", "create", "--name", "token-three")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
|
||||
@@ -239,6 +239,10 @@ func (a *API) Serve(ctx context.Context, l net.Listener) error {
|
||||
return xerrors.Errorf("create agent API server: %w", err)
|
||||
}
|
||||
|
||||
if err := a.ResourcesMonitoringAPI.InitMonitors(ctx); err != nil {
|
||||
return xerrors.Errorf("initialize resource monitoring: %w", err)
|
||||
}
|
||||
|
||||
return server.Serve(ctx, l)
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
@@ -33,42 +34,60 @@ type ResourcesMonitoringAPI struct {
|
||||
|
||||
Debounce time.Duration
|
||||
Config resourcesmonitor.Config
|
||||
|
||||
// Cache resource monitors on first call to avoid millions of DB queries per day.
|
||||
memoryMonitor database.WorkspaceAgentMemoryResourceMonitor
|
||||
volumeMonitors []database.WorkspaceAgentVolumeResourceMonitor
|
||||
monitorsLock sync.RWMutex
|
||||
}
|
||||
|
||||
func (a *ResourcesMonitoringAPI) GetResourcesMonitoringConfiguration(ctx context.Context, _ *proto.GetResourcesMonitoringConfigurationRequest) (*proto.GetResourcesMonitoringConfigurationResponse, error) {
|
||||
memoryMonitor, memoryErr := a.Database.FetchMemoryResourceMonitorsByAgentID(ctx, a.AgentID)
|
||||
if memoryErr != nil && !errors.Is(memoryErr, sql.ErrNoRows) {
|
||||
return nil, xerrors.Errorf("failed to fetch memory resource monitor: %w", memoryErr)
|
||||
// InitMonitors fetches resource monitors from the database and caches them.
|
||||
// This must be called once after creating a ResourcesMonitoringAPI, the context should be
|
||||
// the agent per-RPC connection context. If fetching fails with a real error (not sql.ErrNoRows), the
|
||||
// connection should be torn down.
|
||||
func (a *ResourcesMonitoringAPI) InitMonitors(ctx context.Context) error {
|
||||
memMon, err := a.Database.FetchMemoryResourceMonitorsByAgentID(ctx, a.AgentID)
|
||||
if err != nil && !errors.Is(err, sql.ErrNoRows) {
|
||||
return xerrors.Errorf("fetch memory resource monitor: %w", err)
|
||||
}
|
||||
// If sql.ErrNoRows, memoryMonitor stays as zero value (CreatedAt.IsZero() = true).
|
||||
// Otherwise, store the fetched monitor.
|
||||
if err == nil {
|
||||
a.memoryMonitor = memMon
|
||||
}
|
||||
|
||||
volumeMonitors, err := a.Database.FetchVolumesResourceMonitorsByAgentID(ctx, a.AgentID)
|
||||
volMons, err := a.Database.FetchVolumesResourceMonitorsByAgentID(ctx, a.AgentID)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to fetch volume resource monitors: %w", err)
|
||||
return xerrors.Errorf("fetch volume resource monitors: %w", err)
|
||||
}
|
||||
// 0 length is valid, indicating none configured, since the volume monitors in the DB can be many.
|
||||
a.volumeMonitors = volMons
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ResourcesMonitoringAPI) GetResourcesMonitoringConfiguration(_ context.Context, _ *proto.GetResourcesMonitoringConfigurationRequest) (*proto.GetResourcesMonitoringConfigurationResponse, error) {
|
||||
return &proto.GetResourcesMonitoringConfigurationResponse{
|
||||
Config: &proto.GetResourcesMonitoringConfigurationResponse_Config{
|
||||
CollectionIntervalSeconds: int32(a.Config.CollectionInterval.Seconds()),
|
||||
NumDatapoints: a.Config.NumDatapoints,
|
||||
},
|
||||
Memory: func() *proto.GetResourcesMonitoringConfigurationResponse_Memory {
|
||||
if memoryErr != nil {
|
||||
if a.memoryMonitor.CreatedAt.IsZero() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &proto.GetResourcesMonitoringConfigurationResponse_Memory{
|
||||
Enabled: memoryMonitor.Enabled,
|
||||
Enabled: a.memoryMonitor.Enabled,
|
||||
}
|
||||
}(),
|
||||
Volumes: func() []*proto.GetResourcesMonitoringConfigurationResponse_Volume {
|
||||
volumes := make([]*proto.GetResourcesMonitoringConfigurationResponse_Volume, 0, len(volumeMonitors))
|
||||
for _, monitor := range volumeMonitors {
|
||||
volumes := make([]*proto.GetResourcesMonitoringConfigurationResponse_Volume, 0, len(a.volumeMonitors))
|
||||
for _, monitor := range a.volumeMonitors {
|
||||
volumes = append(volumes, &proto.GetResourcesMonitoringConfigurationResponse_Volume{
|
||||
Enabled: monitor.Enabled,
|
||||
Path: monitor.Path,
|
||||
})
|
||||
}
|
||||
|
||||
return volumes
|
||||
}(),
|
||||
}, nil
|
||||
@@ -77,6 +96,10 @@ func (a *ResourcesMonitoringAPI) GetResourcesMonitoringConfiguration(ctx context
|
||||
func (a *ResourcesMonitoringAPI) PushResourcesMonitoringUsage(ctx context.Context, req *proto.PushResourcesMonitoringUsageRequest) (*proto.PushResourcesMonitoringUsageResponse, error) {
|
||||
var err error
|
||||
|
||||
// Lock for the entire push operation since calls are sequential from the agent
|
||||
a.monitorsLock.Lock()
|
||||
defer a.monitorsLock.Unlock()
|
||||
|
||||
if memoryErr := a.monitorMemory(ctx, req.Datapoints); memoryErr != nil {
|
||||
err = errors.Join(err, xerrors.Errorf("monitor memory: %w", memoryErr))
|
||||
}
|
||||
@@ -89,18 +112,7 @@ func (a *ResourcesMonitoringAPI) PushResourcesMonitoringUsage(ctx context.Contex
|
||||
}
|
||||
|
||||
func (a *ResourcesMonitoringAPI) monitorMemory(ctx context.Context, datapoints []*proto.PushResourcesMonitoringUsageRequest_Datapoint) error {
|
||||
monitor, err := a.Database.FetchMemoryResourceMonitorsByAgentID(ctx, a.AgentID)
|
||||
if err != nil {
|
||||
// It is valid for an agent to not have a memory monitor, so we
|
||||
// do not want to treat it as an error.
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return xerrors.Errorf("fetch memory resource monitor: %w", err)
|
||||
}
|
||||
|
||||
if !monitor.Enabled {
|
||||
if !a.memoryMonitor.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -109,15 +121,15 @@ func (a *ResourcesMonitoringAPI) monitorMemory(ctx context.Context, datapoints [
|
||||
usageDatapoints = append(usageDatapoints, datapoint.Memory)
|
||||
}
|
||||
|
||||
usageStates := resourcesmonitor.CalculateMemoryUsageStates(monitor, usageDatapoints)
|
||||
usageStates := resourcesmonitor.CalculateMemoryUsageStates(a.memoryMonitor, usageDatapoints)
|
||||
|
||||
oldState := monitor.State
|
||||
oldState := a.memoryMonitor.State
|
||||
newState := resourcesmonitor.NextState(a.Config, oldState, usageStates)
|
||||
|
||||
debouncedUntil, shouldNotify := monitor.Debounce(a.Debounce, a.Clock.Now(), oldState, newState)
|
||||
debouncedUntil, shouldNotify := a.memoryMonitor.Debounce(a.Debounce, a.Clock.Now(), oldState, newState)
|
||||
|
||||
//nolint:gocritic // We need to be able to update the resource monitor here.
|
||||
err = a.Database.UpdateMemoryResourceMonitor(dbauthz.AsResourceMonitor(ctx), database.UpdateMemoryResourceMonitorParams{
|
||||
err := a.Database.UpdateMemoryResourceMonitor(dbauthz.AsResourceMonitor(ctx), database.UpdateMemoryResourceMonitorParams{
|
||||
AgentID: a.AgentID,
|
||||
State: newState,
|
||||
UpdatedAt: dbtime.Time(a.Clock.Now()),
|
||||
@@ -127,6 +139,11 @@ func (a *ResourcesMonitoringAPI) monitorMemory(ctx context.Context, datapoints [
|
||||
return xerrors.Errorf("update workspace monitor: %w", err)
|
||||
}
|
||||
|
||||
// Update cached state
|
||||
a.memoryMonitor.State = newState
|
||||
a.memoryMonitor.DebouncedUntil = dbtime.Time(debouncedUntil)
|
||||
a.memoryMonitor.UpdatedAt = dbtime.Time(a.Clock.Now())
|
||||
|
||||
if !shouldNotify {
|
||||
return nil
|
||||
}
|
||||
@@ -143,7 +160,7 @@ func (a *ResourcesMonitoringAPI) monitorMemory(ctx context.Context, datapoints [
|
||||
notifications.TemplateWorkspaceOutOfMemory,
|
||||
map[string]string{
|
||||
"workspace": workspace.Name,
|
||||
"threshold": fmt.Sprintf("%d%%", monitor.Threshold),
|
||||
"threshold": fmt.Sprintf("%d%%", a.memoryMonitor.Threshold),
|
||||
},
|
||||
map[string]any{
|
||||
// NOTE(DanielleMaywood):
|
||||
@@ -169,14 +186,9 @@ func (a *ResourcesMonitoringAPI) monitorMemory(ctx context.Context, datapoints [
|
||||
}
|
||||
|
||||
func (a *ResourcesMonitoringAPI) monitorVolumes(ctx context.Context, datapoints []*proto.PushResourcesMonitoringUsageRequest_Datapoint) error {
|
||||
volumeMonitors, err := a.Database.FetchVolumesResourceMonitorsByAgentID(ctx, a.AgentID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get or insert volume monitor: %w", err)
|
||||
}
|
||||
|
||||
outOfDiskVolumes := make([]map[string]any, 0)
|
||||
|
||||
for _, monitor := range volumeMonitors {
|
||||
for i, monitor := range a.volumeMonitors {
|
||||
if !monitor.Enabled {
|
||||
continue
|
||||
}
|
||||
@@ -219,6 +231,11 @@ func (a *ResourcesMonitoringAPI) monitorVolumes(ctx context.Context, datapoints
|
||||
}); err != nil {
|
||||
return xerrors.Errorf("update workspace monitor: %w", err)
|
||||
}
|
||||
|
||||
// Update cached state
|
||||
a.volumeMonitors[i].State = newState
|
||||
a.volumeMonitors[i].DebouncedUntil = dbtime.Time(debouncedUntil)
|
||||
a.volumeMonitors[i].UpdatedAt = dbtime.Time(a.Clock.Now())
|
||||
}
|
||||
|
||||
if len(outOfDiskVolumes) == 0 {
|
||||
|
||||
@@ -101,6 +101,9 @@ func TestMemoryResourceMonitorDebounce(t *testing.T) {
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// Initialize API to fetch and cache the monitors
|
||||
require.NoError(t, api.InitMonitors(context.Background()))
|
||||
|
||||
// When: The monitor is given a state that will trigger NOK
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
@@ -304,6 +307,9 @@ func TestMemoryResourceMonitor(t *testing.T) {
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// Initialize API to fetch and cache the monitors
|
||||
require.NoError(t, api.InitMonitors(context.Background()))
|
||||
|
||||
clock.Set(collectedAt)
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: datapoints,
|
||||
@@ -337,6 +343,8 @@ func TestMemoryResourceMonitorMissingData(t *testing.T) {
|
||||
State: database.WorkspaceAgentMonitorStateOK,
|
||||
Threshold: 80,
|
||||
})
|
||||
// Initialize API to fetch and cache the monitors
|
||||
require.NoError(t, api.InitMonitors(context.Background()))
|
||||
|
||||
// When: A datapoint is missing, surrounded by two NOK datapoints.
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
@@ -387,6 +395,9 @@ func TestMemoryResourceMonitorMissingData(t *testing.T) {
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// Initialize API to fetch and cache the monitors
|
||||
require.NoError(t, api.InitMonitors(context.Background()))
|
||||
|
||||
// When: A datapoint is missing, surrounded by two OK datapoints.
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
@@ -466,6 +477,9 @@ func TestVolumeResourceMonitorDebounce(t *testing.T) {
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// Initialize API to fetch and cache the monitors
|
||||
require.NoError(t, api.InitMonitors(context.Background()))
|
||||
|
||||
// When:
|
||||
// - First monitor is in a NOK state
|
||||
// - Second monitor is in an OK state
|
||||
@@ -742,6 +756,9 @@ func TestVolumeResourceMonitor(t *testing.T) {
|
||||
Threshold: tt.thresholdPercent,
|
||||
})
|
||||
|
||||
// Initialize API to fetch and cache the monitors
|
||||
require.NoError(t, api.InitMonitors(context.Background()))
|
||||
|
||||
clock.Set(collectedAt)
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: datapoints,
|
||||
@@ -780,6 +797,9 @@ func TestVolumeResourceMonitorMultiple(t *testing.T) {
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// Initialize API to fetch and cache the monitors
|
||||
require.NoError(t, api.InitMonitors(context.Background()))
|
||||
|
||||
// When: both of them move to a NOK state
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
@@ -832,6 +852,9 @@ func TestVolumeResourceMonitorMissingData(t *testing.T) {
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// Initialize API to fetch and cache the monitors
|
||||
require.NoError(t, api.InitMonitors(context.Background()))
|
||||
|
||||
// When: A datapoint is missing, surrounded by two NOK datapoints.
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
@@ -891,6 +914,9 @@ func TestVolumeResourceMonitorMissingData(t *testing.T) {
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// Initialize API to fetch and cache the monitors
|
||||
require.NoError(t, api.InitMonitors(context.Background()))
|
||||
|
||||
// When: A datapoint is missing, surrounded by two OK datapoints.
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
|
||||
+325
-430
File diff suppressed because it is too large
Load Diff
+593
-414
File diff suppressed because it is too large
Load Diff
Generated
+215
-70
@@ -85,7 +85,7 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/experimental/aibridge/interceptions": {
|
||||
"/aibridge/interceptions": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
@@ -151,39 +151,16 @@ const docTemplate = `{
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Search query for filtering tasks",
|
||||
"description": "Search query for filtering tasks. Supports: owner:\u003cusername/uuid/me\u003e, organization:\u003corg-name/uuid\u003e, status:\u003cstatus\u003e",
|
||||
"name": "q",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Return tasks after this ID for pagination",
|
||||
"name": "after_id",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"maximum": 100,
|
||||
"minimum": 1,
|
||||
"type": "integer",
|
||||
"default": 25,
|
||||
"description": "Maximum number of tasks to return",
|
||||
"name": "limit",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"minimum": 0,
|
||||
"type": "integer",
|
||||
"default": 0,
|
||||
"description": "Offset for pagination",
|
||||
"name": "offset",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/coderd.tasksListResponse"
|
||||
"$ref": "#/definitions/codersdk.TasksListResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -229,7 +206,7 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/experimental/tasks/{user}/{id}": {
|
||||
"/api/experimental/tasks/{user}/{task}": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
@@ -253,7 +230,7 @@ const docTemplate = `{
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Task ID",
|
||||
"name": "id",
|
||||
"name": "task",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
@@ -290,7 +267,7 @@ const docTemplate = `{
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Task ID",
|
||||
"name": "id",
|
||||
"name": "task",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
@@ -302,7 +279,7 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/experimental/tasks/{user}/{id}/logs": {
|
||||
"/api/experimental/tasks/{user}/{task}/logs": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
@@ -326,7 +303,7 @@ const docTemplate = `{
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Task ID",
|
||||
"name": "id",
|
||||
"name": "task",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
@@ -341,7 +318,7 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/experimental/tasks/{user}/{id}/send": {
|
||||
"/api/experimental/tasks/{user}/{task}/send": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
@@ -365,7 +342,7 @@ const docTemplate = `{
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Task ID",
|
||||
"name": "id",
|
||||
"name": "task",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
@@ -3082,6 +3059,45 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/oauth2/revoke": {
|
||||
"post": {
|
||||
"consumes": [
|
||||
"application/x-www-form-urlencoded"
|
||||
],
|
||||
"tags": [
|
||||
"Enterprise"
|
||||
],
|
||||
"summary": "Revoke OAuth2 tokens (RFC 7009).",
|
||||
"operationId": "oauth2-token-revocation",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Client ID for authentication",
|
||||
"name": "client_id",
|
||||
"in": "formData",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "The token to revoke",
|
||||
"name": "token",
|
||||
"in": "formData",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Hint about token type (access_token or refresh_token)",
|
||||
"name": "token_type_hint",
|
||||
"in": "formData"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Token successfully revoked"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/oauth2/tokens": {
|
||||
"post": {
|
||||
"produces": [
|
||||
@@ -11624,20 +11640,6 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"coderd.tasksListResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"count": {
|
||||
"type": "integer"
|
||||
},
|
||||
"tasks": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Task"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.ACLAvailable": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -11666,12 +11668,35 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.AIBridgeBedrockConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"access_key": {
|
||||
"type": "string"
|
||||
},
|
||||
"access_key_secret": {
|
||||
"type": "string"
|
||||
},
|
||||
"model": {
|
||||
"type": "string"
|
||||
},
|
||||
"region": {
|
||||
"type": "string"
|
||||
},
|
||||
"small_fast_model": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.AIBridgeConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"anthropic": {
|
||||
"$ref": "#/definitions/codersdk.AIBridgeAnthropicConfig"
|
||||
},
|
||||
"bedrock": {
|
||||
"$ref": "#/definitions/codersdk.AIBridgeBedrockConfig"
|
||||
},
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -11683,13 +11708,16 @@ const docTemplate = `{
|
||||
"codersdk.AIBridgeInterception": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ended_at": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"initiator_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
"initiator": {
|
||||
"$ref": "#/definitions/codersdk.MinimalUser"
|
||||
},
|
||||
"metadata": {
|
||||
"type": "object",
|
||||
@@ -11728,14 +11756,14 @@ const docTemplate = `{
|
||||
"codersdk.AIBridgeListInterceptionsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"count": {
|
||||
"type": "integer"
|
||||
},
|
||||
"results": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.AIBridgeInterception"
|
||||
}
|
||||
},
|
||||
"total": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -11879,6 +11907,12 @@ const docTemplate = `{
|
||||
"user_id"
|
||||
],
|
||||
"properties": {
|
||||
"allow_list": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.APIAllowListTarget"
|
||||
}
|
||||
},
|
||||
"created_at": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
@@ -12489,6 +12523,13 @@ const docTemplate = `{
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"organization_member_permissions": {
|
||||
"description": "OrganizationMemberPermissions are specific for the organization in the field 'OrganizationID' above.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Permission"
|
||||
}
|
||||
},
|
||||
"organization_permissions": {
|
||||
"description": "OrganizationPermissions are specific for the organization in the field 'OrganizationID' above.",
|
||||
"type": "array",
|
||||
@@ -13712,6 +13753,13 @@ const docTemplate = `{
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"organization_member_permissions": {
|
||||
"description": "OrganizationMemberPermissions are specific to the organization the role belongs to.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Permission"
|
||||
}
|
||||
},
|
||||
"organization_permissions": {
|
||||
"description": "OrganizationPermissions are specific to the organization the role belongs to.",
|
||||
"type": "array",
|
||||
@@ -13977,6 +14025,9 @@ const docTemplate = `{
|
||||
"docs_url": {
|
||||
"$ref": "#/definitions/serpent.URL"
|
||||
},
|
||||
"enable_authz_recording": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"enable_terraform_debug_mode": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -14265,11 +14316,9 @@ const docTemplate = `{
|
||||
"web-push",
|
||||
"oauth2",
|
||||
"mcp-server-http",
|
||||
"workspace-sharing",
|
||||
"aibridge"
|
||||
"workspace-sharing"
|
||||
],
|
||||
"x-enum-comments": {
|
||||
"ExperimentAIBridge": "Enables AI Bridge functionality.",
|
||||
"ExperimentAutoFillParameters": "This should not be taken out of experiments until we have redesigned the feature.",
|
||||
"ExperimentExample": "This isn't used for anything.",
|
||||
"ExperimentMCPServerHTTP": "Enables the MCP HTTP server functionality.",
|
||||
@@ -14287,8 +14336,7 @@ const docTemplate = `{
|
||||
"ExperimentWebPush",
|
||||
"ExperimentOAuth2",
|
||||
"ExperimentMCPServerHTTP",
|
||||
"ExperimentWorkspaceSharing",
|
||||
"ExperimentAIBridge"
|
||||
"ExperimentWorkspaceSharing"
|
||||
]
|
||||
},
|
||||
"codersdk.ExternalAPIKeyScopes": {
|
||||
@@ -14895,7 +14943,15 @@ const docTemplate = `{
|
||||
"enum": [
|
||||
"bug",
|
||||
"chat",
|
||||
"docs"
|
||||
"docs",
|
||||
"star"
|
||||
]
|
||||
},
|
||||
"location": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"navbar",
|
||||
"dropdown"
|
||||
]
|
||||
},
|
||||
"name": {
|
||||
@@ -15068,6 +15124,9 @@ const docTemplate = `{
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"username": {
|
||||
"type": "string"
|
||||
}
|
||||
@@ -15340,6 +15399,9 @@ const docTemplate = `{
|
||||
},
|
||||
"token": {
|
||||
"type": "string"
|
||||
},
|
||||
"token_revoke": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -15373,6 +15435,9 @@ const docTemplate = `{
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"revocation_endpoint": {
|
||||
"type": "string"
|
||||
},
|
||||
"scopes_supported": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
@@ -15439,7 +15504,10 @@ const docTemplate = `{
|
||||
}
|
||||
},
|
||||
"registration_access_token": {
|
||||
"type": "string"
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"registration_client_uri": {
|
||||
"type": "string"
|
||||
@@ -17459,6 +17527,13 @@ const docTemplate = `{
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"organization_member_permissions": {
|
||||
"description": "OrganizationMemberPermissions are specific for the organization in the field 'OrganizationID' above.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Permission"
|
||||
}
|
||||
},
|
||||
"organization_permissions": {
|
||||
"description": "OrganizationPermissions are specific for the organization in the field 'OrganizationID' above.",
|
||||
"type": "array",
|
||||
@@ -17703,6 +17778,9 @@ const docTemplate = `{
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"owner_avatar_url": {
|
||||
"type": "string"
|
||||
},
|
||||
"owner_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
@@ -17713,19 +17791,15 @@ const docTemplate = `{
|
||||
"status": {
|
||||
"enum": [
|
||||
"pending",
|
||||
"starting",
|
||||
"running",
|
||||
"stopping",
|
||||
"stopped",
|
||||
"failed",
|
||||
"canceling",
|
||||
"canceled",
|
||||
"deleting",
|
||||
"deleted"
|
||||
"initializing",
|
||||
"active",
|
||||
"paused",
|
||||
"unknown",
|
||||
"error"
|
||||
],
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/codersdk.WorkspaceStatus"
|
||||
"$ref": "#/definitions/codersdk.TaskStatus"
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -17742,6 +17816,10 @@ const docTemplate = `{
|
||||
"template_name": {
|
||||
"type": "string"
|
||||
},
|
||||
"template_version_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"updated_at": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
@@ -17778,6 +17856,28 @@ const docTemplate = `{
|
||||
"$ref": "#/definitions/uuid.NullUUID"
|
||||
}
|
||||
]
|
||||
},
|
||||
"workspace_name": {
|
||||
"type": "string"
|
||||
},
|
||||
"workspace_status": {
|
||||
"enum": [
|
||||
"pending",
|
||||
"starting",
|
||||
"running",
|
||||
"stopping",
|
||||
"stopped",
|
||||
"failed",
|
||||
"canceling",
|
||||
"canceled",
|
||||
"deleting",
|
||||
"deleted"
|
||||
],
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/codersdk.WorkspaceStatus"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -17862,6 +17962,39 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.TaskStatus": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"pending",
|
||||
"initializing",
|
||||
"active",
|
||||
"paused",
|
||||
"unknown",
|
||||
"error"
|
||||
],
|
||||
"x-enum-varnames": [
|
||||
"TaskStatusPending",
|
||||
"TaskStatusInitializing",
|
||||
"TaskStatusActive",
|
||||
"TaskStatusPaused",
|
||||
"TaskStatusUnknown",
|
||||
"TaskStatusError"
|
||||
]
|
||||
},
|
||||
"codersdk.TasksListResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"count": {
|
||||
"type": "integer"
|
||||
},
|
||||
"tasks": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Task"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.TelemetryConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -19582,6 +19715,14 @@ const docTemplate = `{
|
||||
"description": "OwnerName is the username of the owner of the workspace.",
|
||||
"type": "string"
|
||||
},
|
||||
"task_id": {
|
||||
"description": "TaskID, if set, indicates that the workspace is relevant to the given codersdk.Task.",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/uuid.NullUUID"
|
||||
}
|
||||
]
|
||||
},
|
||||
"template_active_version_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
@@ -20389,6 +20530,7 @@ const docTemplate = `{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ai_task_sidebar_app_id": {
|
||||
"description": "Deprecated: This field has been replaced with ` + "`" + `Task.WorkspaceAppID` + "`" + `",
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
@@ -20892,6 +21034,9 @@ const docTemplate = `{
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"role": {
|
||||
"enum": [
|
||||
"admin",
|
||||
|
||||
Generated
+207
-70
@@ -65,7 +65,7 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/experimental/aibridge/interceptions": {
|
||||
"/aibridge/interceptions": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
@@ -125,39 +125,16 @@
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Search query for filtering tasks",
|
||||
"description": "Search query for filtering tasks. Supports: owner:\u003cusername/uuid/me\u003e, organization:\u003corg-name/uuid\u003e, status:\u003cstatus\u003e",
|
||||
"name": "q",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Return tasks after this ID for pagination",
|
||||
"name": "after_id",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"maximum": 100,
|
||||
"minimum": 1,
|
||||
"type": "integer",
|
||||
"default": 25,
|
||||
"description": "Maximum number of tasks to return",
|
||||
"name": "limit",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"minimum": 0,
|
||||
"type": "integer",
|
||||
"default": 0,
|
||||
"description": "Offset for pagination",
|
||||
"name": "offset",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/coderd.tasksListResponse"
|
||||
"$ref": "#/definitions/codersdk.TasksListResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -201,7 +178,7 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/experimental/tasks/{user}/{id}": {
|
||||
"/api/experimental/tasks/{user}/{task}": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
@@ -223,7 +200,7 @@
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Task ID",
|
||||
"name": "id",
|
||||
"name": "task",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
@@ -258,7 +235,7 @@
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Task ID",
|
||||
"name": "id",
|
||||
"name": "task",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
@@ -270,7 +247,7 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/experimental/tasks/{user}/{id}/logs": {
|
||||
"/api/experimental/tasks/{user}/{task}/logs": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
@@ -292,7 +269,7 @@
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Task ID",
|
||||
"name": "id",
|
||||
"name": "task",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
@@ -307,7 +284,7 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/experimental/tasks/{user}/{id}/send": {
|
||||
"/api/experimental/tasks/{user}/{task}/send": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
@@ -329,7 +306,7 @@
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Task ID",
|
||||
"name": "id",
|
||||
"name": "task",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
@@ -2720,6 +2697,41 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/oauth2/revoke": {
|
||||
"post": {
|
||||
"consumes": ["application/x-www-form-urlencoded"],
|
||||
"tags": ["Enterprise"],
|
||||
"summary": "Revoke OAuth2 tokens (RFC 7009).",
|
||||
"operationId": "oauth2-token-revocation",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Client ID for authentication",
|
||||
"name": "client_id",
|
||||
"in": "formData",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "The token to revoke",
|
||||
"name": "token",
|
||||
"in": "formData",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Hint about token type (access_token or refresh_token)",
|
||||
"name": "token_type_hint",
|
||||
"in": "formData"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Token successfully revoked"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/oauth2/tokens": {
|
||||
"post": {
|
||||
"produces": ["application/json"],
|
||||
@@ -10324,20 +10336,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"coderd.tasksListResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"count": {
|
||||
"type": "integer"
|
||||
},
|
||||
"tasks": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Task"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.ACLAvailable": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -10366,12 +10364,35 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.AIBridgeBedrockConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"access_key": {
|
||||
"type": "string"
|
||||
},
|
||||
"access_key_secret": {
|
||||
"type": "string"
|
||||
},
|
||||
"model": {
|
||||
"type": "string"
|
||||
},
|
||||
"region": {
|
||||
"type": "string"
|
||||
},
|
||||
"small_fast_model": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.AIBridgeConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"anthropic": {
|
||||
"$ref": "#/definitions/codersdk.AIBridgeAnthropicConfig"
|
||||
},
|
||||
"bedrock": {
|
||||
"$ref": "#/definitions/codersdk.AIBridgeBedrockConfig"
|
||||
},
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -10383,13 +10404,16 @@
|
||||
"codersdk.AIBridgeInterception": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ended_at": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"initiator_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
"initiator": {
|
||||
"$ref": "#/definitions/codersdk.MinimalUser"
|
||||
},
|
||||
"metadata": {
|
||||
"type": "object",
|
||||
@@ -10428,14 +10452,14 @@
|
||||
"codersdk.AIBridgeListInterceptionsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"count": {
|
||||
"type": "integer"
|
||||
},
|
||||
"results": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.AIBridgeInterception"
|
||||
}
|
||||
},
|
||||
"total": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -10579,6 +10603,12 @@
|
||||
"user_id"
|
||||
],
|
||||
"properties": {
|
||||
"allow_list": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.APIAllowListTarget"
|
||||
}
|
||||
},
|
||||
"created_at": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
@@ -11175,6 +11205,13 @@
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"organization_member_permissions": {
|
||||
"description": "OrganizationMemberPermissions are specific for the organization in the field 'OrganizationID' above.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Permission"
|
||||
}
|
||||
},
|
||||
"organization_permissions": {
|
||||
"description": "OrganizationPermissions are specific for the organization in the field 'OrganizationID' above.",
|
||||
"type": "array",
|
||||
@@ -12330,6 +12367,13 @@
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"organization_member_permissions": {
|
||||
"description": "OrganizationMemberPermissions are specific to the organization the role belongs to.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Permission"
|
||||
}
|
||||
},
|
||||
"organization_permissions": {
|
||||
"description": "OrganizationPermissions are specific to the organization the role belongs to.",
|
||||
"type": "array",
|
||||
@@ -12595,6 +12639,9 @@
|
||||
"docs_url": {
|
||||
"$ref": "#/definitions/serpent.URL"
|
||||
},
|
||||
"enable_authz_recording": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"enable_terraform_debug_mode": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -12876,11 +12923,9 @@
|
||||
"web-push",
|
||||
"oauth2",
|
||||
"mcp-server-http",
|
||||
"workspace-sharing",
|
||||
"aibridge"
|
||||
"workspace-sharing"
|
||||
],
|
||||
"x-enum-comments": {
|
||||
"ExperimentAIBridge": "Enables AI Bridge functionality.",
|
||||
"ExperimentAutoFillParameters": "This should not be taken out of experiments until we have redesigned the feature.",
|
||||
"ExperimentExample": "This isn't used for anything.",
|
||||
"ExperimentMCPServerHTTP": "Enables the MCP HTTP server functionality.",
|
||||
@@ -12898,8 +12943,7 @@
|
||||
"ExperimentWebPush",
|
||||
"ExperimentOAuth2",
|
||||
"ExperimentMCPServerHTTP",
|
||||
"ExperimentWorkspaceSharing",
|
||||
"ExperimentAIBridge"
|
||||
"ExperimentWorkspaceSharing"
|
||||
]
|
||||
},
|
||||
"codersdk.ExternalAPIKeyScopes": {
|
||||
@@ -13487,7 +13531,11 @@
|
||||
"properties": {
|
||||
"icon": {
|
||||
"type": "string",
|
||||
"enum": ["bug", "chat", "docs"]
|
||||
"enum": ["bug", "chat", "docs", "star"]
|
||||
},
|
||||
"location": {
|
||||
"type": "string",
|
||||
"enum": ["navbar", "dropdown"]
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
@@ -13630,6 +13678,9 @@
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"username": {
|
||||
"type": "string"
|
||||
}
|
||||
@@ -13902,6 +13953,9 @@
|
||||
},
|
||||
"token": {
|
||||
"type": "string"
|
||||
},
|
||||
"token_revoke": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -13935,6 +13989,9 @@
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"revocation_endpoint": {
|
||||
"type": "string"
|
||||
},
|
||||
"scopes_supported": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
@@ -14001,7 +14058,10 @@
|
||||
}
|
||||
},
|
||||
"registration_access_token": {
|
||||
"type": "string"
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"registration_client_uri": {
|
||||
"type": "string"
|
||||
@@ -15959,6 +16019,13 @@
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"organization_member_permissions": {
|
||||
"description": "OrganizationMemberPermissions are specific for the organization in the field 'OrganizationID' above.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Permission"
|
||||
}
|
||||
},
|
||||
"organization_permissions": {
|
||||
"description": "OrganizationPermissions are specific for the organization in the field 'OrganizationID' above.",
|
||||
"type": "array",
|
||||
@@ -16199,6 +16266,9 @@
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"owner_avatar_url": {
|
||||
"type": "string"
|
||||
},
|
||||
"owner_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
@@ -16209,19 +16279,15 @@
|
||||
"status": {
|
||||
"enum": [
|
||||
"pending",
|
||||
"starting",
|
||||
"running",
|
||||
"stopping",
|
||||
"stopped",
|
||||
"failed",
|
||||
"canceling",
|
||||
"canceled",
|
||||
"deleting",
|
||||
"deleted"
|
||||
"initializing",
|
||||
"active",
|
||||
"paused",
|
||||
"unknown",
|
||||
"error"
|
||||
],
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/codersdk.WorkspaceStatus"
|
||||
"$ref": "#/definitions/codersdk.TaskStatus"
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -16238,6 +16304,10 @@
|
||||
"template_name": {
|
||||
"type": "string"
|
||||
},
|
||||
"template_version_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"updated_at": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
@@ -16274,6 +16344,28 @@
|
||||
"$ref": "#/definitions/uuid.NullUUID"
|
||||
}
|
||||
]
|
||||
},
|
||||
"workspace_name": {
|
||||
"type": "string"
|
||||
},
|
||||
"workspace_status": {
|
||||
"enum": [
|
||||
"pending",
|
||||
"starting",
|
||||
"running",
|
||||
"stopping",
|
||||
"stopped",
|
||||
"failed",
|
||||
"canceling",
|
||||
"canceled",
|
||||
"deleting",
|
||||
"deleted"
|
||||
],
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/codersdk.WorkspaceStatus"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -16347,6 +16439,39 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.TaskStatus": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"pending",
|
||||
"initializing",
|
||||
"active",
|
||||
"paused",
|
||||
"unknown",
|
||||
"error"
|
||||
],
|
||||
"x-enum-varnames": [
|
||||
"TaskStatusPending",
|
||||
"TaskStatusInitializing",
|
||||
"TaskStatusActive",
|
||||
"TaskStatusPaused",
|
||||
"TaskStatusUnknown",
|
||||
"TaskStatusError"
|
||||
]
|
||||
},
|
||||
"codersdk.TasksListResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"count": {
|
||||
"type": "integer"
|
||||
},
|
||||
"tasks": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Task"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.TelemetryConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -17976,6 +18101,14 @@
|
||||
"description": "OwnerName is the username of the owner of the workspace.",
|
||||
"type": "string"
|
||||
},
|
||||
"task_id": {
|
||||
"description": "TaskID, if set, indicates that the workspace is relevant to the given codersdk.Task.",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/uuid.NullUUID"
|
||||
}
|
||||
]
|
||||
},
|
||||
"template_active_version_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
@@ -18731,6 +18864,7 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ai_task_sidebar_app_id": {
|
||||
"description": "Deprecated: This field has been replaced with `Task.WorkspaceAppID`",
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
@@ -19208,6 +19342,9 @@
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"role": {
|
||||
"enum": ["admin", "use"],
|
||||
"allOf": [
|
||||
|
||||
+28
-15
@@ -2,6 +2,7 @@ package apikey
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"crypto/subtle"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
@@ -44,12 +45,17 @@ type CreateParams struct {
|
||||
// database representation. It is the responsibility of the caller to insert it
|
||||
// into the database.
|
||||
func Generate(params CreateParams) (database.InsertAPIKeyParams, string, error) {
|
||||
keyID, keySecret, err := generateKey()
|
||||
// Length of an API Key ID.
|
||||
keyID, err := cryptorand.String(10)
|
||||
if err != nil {
|
||||
return database.InsertAPIKeyParams{}, "", xerrors.Errorf("generate API key: %w", err)
|
||||
return database.InsertAPIKeyParams{}, "", xerrors.Errorf("generate API key ID: %w", err)
|
||||
}
|
||||
|
||||
hashed := sha256.Sum256([]byte(keySecret))
|
||||
// Length of an API Key secret.
|
||||
keySecret, hashedSecret, err := GenerateSecret(22)
|
||||
if err != nil {
|
||||
return database.InsertAPIKeyParams{}, "", xerrors.Errorf("generate API key secret: %w", err)
|
||||
}
|
||||
|
||||
// Default expires at to now+lifetime, or use the configured value if not
|
||||
// set.
|
||||
@@ -120,7 +126,7 @@ func Generate(params CreateParams) (database.InsertAPIKeyParams, string, error)
|
||||
ExpiresAt: params.ExpiresAt.UTC(),
|
||||
CreatedAt: dbtime.Now(),
|
||||
UpdatedAt: dbtime.Now(),
|
||||
HashedSecret: hashed[:],
|
||||
HashedSecret: hashedSecret,
|
||||
LoginType: params.LoginType,
|
||||
Scopes: scopes,
|
||||
AllowList: params.AllowList,
|
||||
@@ -128,17 +134,24 @@ func Generate(params CreateParams) (database.InsertAPIKeyParams, string, error)
|
||||
}, token, nil
|
||||
}
|
||||
|
||||
// generateKey a new ID and secret for an API key.
|
||||
func generateKey() (id string, secret string, err error) {
|
||||
// Length of an API Key ID.
|
||||
id, err = cryptorand.String(10)
|
||||
func GenerateSecret(length int) (secret string, hashed []byte, err error) {
|
||||
secret, err = cryptorand.String(length)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
return "", nil, err
|
||||
}
|
||||
// Length of an API Key secret.
|
||||
secret, err = cryptorand.String(22)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return id, secret, nil
|
||||
hash := HashSecret(secret)
|
||||
return secret, hash, nil
|
||||
}
|
||||
|
||||
// ValidateHash compares a secret against an expected hashed secret.
|
||||
func ValidateHash(hashedSecret []byte, secret string) bool {
|
||||
hash := HashSecret(secret)
|
||||
return subtle.ConstantTimeCompare(hashedSecret, hash) == 1
|
||||
}
|
||||
|
||||
// HashSecret is the single function used to hash API key secrets.
|
||||
// Use this to ensure a consistent hashing algorithm.
|
||||
func HashSecret(secret string) []byte {
|
||||
hash := sha256.Sum256([]byte(secret))
|
||||
return hash[:]
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package apikey_test
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -126,8 +125,8 @@ func TestGenerate(t *testing.T) {
|
||||
require.Equal(t, key.ID, keytokens[0])
|
||||
|
||||
// Assert that the hashed secret is correct.
|
||||
hashed := sha256.Sum256([]byte(keytokens[1]))
|
||||
assert.ElementsMatch(t, hashed, key.HashedSecret)
|
||||
equal := apikey.ValidateHash(key.HashedSecret, keytokens[1])
|
||||
require.True(t, equal, "valid secret")
|
||||
|
||||
assert.Equal(t, tc.params.UserID, key.UserID)
|
||||
assert.WithinDuration(t, dbtime.Now(), key.CreatedAt, time.Second*5)
|
||||
@@ -173,3 +172,17 @@ func TestGenerate(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestInvalid just ensures the false case is asserted by some tests.
|
||||
// Otherwise, a function that just `returns true` might pass all tests incorrectly.
|
||||
func TestInvalid(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
require.Falsef(t, apikey.ValidateHash([]byte{}, "secret"), "empty hash")
|
||||
|
||||
secret, hash, err := apikey.GenerateSecret(10)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Falsef(t, apikey.ValidateHash(hash, secret+"_"), "different secret")
|
||||
require.Falsef(t, apikey.ValidateHash(hash[:len(hash)-1], secret), "different hash length")
|
||||
}
|
||||
|
||||
@@ -51,6 +51,8 @@ func TestTokenCRUD(t *testing.T) {
|
||||
require.Greater(t, keys[0].ExpiresAt, time.Now().Add(time.Hour*24*6))
|
||||
require.Less(t, keys[0].ExpiresAt, time.Now().Add(time.Hour*24*8))
|
||||
require.Equal(t, codersdk.APIKeyScopeAll, keys[0].Scope)
|
||||
require.Len(t, keys[0].AllowList, 1)
|
||||
require.Equal(t, "*:*", keys[0].AllowList[0].String())
|
||||
|
||||
// no update
|
||||
|
||||
@@ -86,6 +88,8 @@ func TestTokenScoped(t *testing.T) {
|
||||
require.EqualValues(t, len(keys), 1)
|
||||
require.Contains(t, res.Key, keys[0].ID)
|
||||
require.Equal(t, keys[0].Scope, codersdk.APIKeyScopeApplicationConnect)
|
||||
require.Len(t, keys[0].AllowList, 1)
|
||||
require.Equal(t, "*:*", keys[0].AllowList[0].String())
|
||||
}
|
||||
|
||||
// Ensure backward-compat: when a token is created using the legacy singular
|
||||
@@ -132,6 +136,8 @@ func TestTokenLegacySingularScopeCompat(t *testing.T) {
|
||||
require.Len(t, keys, 1)
|
||||
require.Equal(t, tc.scope, keys[0].Scope)
|
||||
require.ElementsMatch(t, keys[0].Scopes, tc.scopes)
|
||||
require.Len(t, keys[0].AllowList, 1)
|
||||
require.Equal(t, "*:*", keys[0].AllowList[0].String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
+2
-2
@@ -509,11 +509,11 @@ func (api *API) auditLogResourceLink(ctx context.Context, alog database.GetAudit
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
workspace, err := api.Database.GetWorkspaceByID(ctx, task.WorkspaceID.UUID)
|
||||
user, err := api.Database.GetUserByID(ctx, task.OwnerID)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("/tasks/%s/%s", workspace.OwnerName, task.Name)
|
||||
return fmt.Sprintf("/tasks/%s/%s", user.Username, task.ID)
|
||||
|
||||
default:
|
||||
return ""
|
||||
|
||||
+11
-10
@@ -50,6 +50,13 @@ func TestCheckPermissions(t *testing.T) {
|
||||
},
|
||||
Action: "read",
|
||||
},
|
||||
readOrgWorkspaces: {
|
||||
Object: codersdk.AuthorizationObject{
|
||||
ResourceType: codersdk.ResourceWorkspace,
|
||||
OrganizationID: adminUser.OrganizationID.String(),
|
||||
},
|
||||
Action: "read",
|
||||
},
|
||||
readMyself: {
|
||||
Object: codersdk.AuthorizationObject{
|
||||
ResourceType: codersdk.ResourceUser,
|
||||
@@ -58,16 +65,10 @@ func TestCheckPermissions(t *testing.T) {
|
||||
Action: "read",
|
||||
},
|
||||
readOwnWorkspaces: {
|
||||
Object: codersdk.AuthorizationObject{
|
||||
ResourceType: codersdk.ResourceWorkspace,
|
||||
OwnerID: "me",
|
||||
},
|
||||
Action: "read",
|
||||
},
|
||||
readOrgWorkspaces: {
|
||||
Object: codersdk.AuthorizationObject{
|
||||
ResourceType: codersdk.ResourceWorkspace,
|
||||
OrganizationID: adminUser.OrganizationID.String(),
|
||||
OwnerID: "me",
|
||||
},
|
||||
Action: "read",
|
||||
},
|
||||
@@ -92,9 +93,9 @@ func TestCheckPermissions(t *testing.T) {
|
||||
UserID: adminUser.UserID,
|
||||
Check: map[string]bool{
|
||||
readAllUsers: true,
|
||||
readOrgWorkspaces: true,
|
||||
readMyself: true,
|
||||
readOwnWorkspaces: true,
|
||||
readOrgWorkspaces: true,
|
||||
updateSpecificTemplate: true,
|
||||
},
|
||||
},
|
||||
@@ -104,9 +105,9 @@ func TestCheckPermissions(t *testing.T) {
|
||||
UserID: orgAdminUser.ID,
|
||||
Check: map[string]bool{
|
||||
readAllUsers: true,
|
||||
readOrgWorkspaces: true,
|
||||
readMyself: true,
|
||||
readOwnWorkspaces: true,
|
||||
readOrgWorkspaces: true,
|
||||
updateSpecificTemplate: true,
|
||||
},
|
||||
},
|
||||
@@ -116,9 +117,9 @@ func TestCheckPermissions(t *testing.T) {
|
||||
UserID: memberUser.ID,
|
||||
Check: map[string]bool{
|
||||
readAllUsers: false,
|
||||
readOrgWorkspaces: false,
|
||||
readMyself: true,
|
||||
readOwnWorkspaces: true,
|
||||
readOrgWorkspaces: false,
|
||||
updateSpecificTemplate: false,
|
||||
},
|
||||
},
|
||||
|
||||
@@ -776,10 +776,6 @@ func TestExecutorWorkspaceAutostopNoWaitChangedMyMind(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestExecutorAutostartMultipleOK(t *testing.T) {
|
||||
if !dbtestutil.WillUsePostgres() {
|
||||
t.Skip(`This test only really works when using a "real" database, similar to a HA setup`)
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
@@ -1259,10 +1255,6 @@ func TestNotifications(t *testing.T) {
|
||||
func TestExecutorPrebuilds(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if !dbtestutil.WillUsePostgres() {
|
||||
t.Skip("this test requires postgres")
|
||||
}
|
||||
|
||||
// Prebuild workspaces should not be autostopped when the deadline is reached.
|
||||
// After being claimed, the workspace should stop at the deadline.
|
||||
t.Run("OnlyStopsAfterClaimed", func(t *testing.T) {
|
||||
@@ -1772,3 +1764,175 @@ func TestExecutorAutostartSkipsWhenNoProvisionersAvailable(t *testing.T) {
|
||||
|
||||
assert.Len(t, stats.Transitions, 1, "should create builds when provisioners are available")
|
||||
}
|
||||
|
||||
func TestExecutorTaskWorkspace(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
createTaskTemplate := func(t *testing.T, client *codersdk.Client, orgID uuid.UUID, ctx context.Context, defaultTTL time.Duration) codersdk.Template {
|
||||
t.Helper()
|
||||
|
||||
taskAppID := uuid.New()
|
||||
version := coderdtest.CreateTemplateVersion(t, client, orgID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionPlan: []*proto.Response{
|
||||
{
|
||||
Type: &proto.Response_Plan{
|
||||
Plan: &proto.PlanComplete{HasAiTasks: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
ProvisionApply: []*proto.Response{
|
||||
{
|
||||
Type: &proto.Response_Apply{
|
||||
Apply: &proto.ApplyComplete{
|
||||
Resources: []*proto.Resource{
|
||||
{
|
||||
Agents: []*proto.Agent{
|
||||
{
|
||||
Id: uuid.NewString(),
|
||||
Name: "dev",
|
||||
Auth: &proto.Agent_Token{
|
||||
Token: uuid.NewString(),
|
||||
},
|
||||
Apps: []*proto.App{
|
||||
{
|
||||
Id: taskAppID.String(),
|
||||
Slug: "task-app",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
AiTasks: []*proto.AITask{
|
||||
{
|
||||
AppId: taskAppID.String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, orgID, version.ID)
|
||||
|
||||
if defaultTTL > 0 {
|
||||
_, err := client.UpdateTemplateMeta(ctx, template.ID, codersdk.UpdateTemplateMeta{
|
||||
DefaultTTLMillis: defaultTTL.Milliseconds(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
return template
|
||||
}
|
||||
|
||||
createTaskWorkspace := func(t *testing.T, client *codersdk.Client, template codersdk.Template, ctx context.Context, input string) codersdk.Workspace {
|
||||
t.Helper()
|
||||
|
||||
exp := codersdk.NewExperimentalClient(client)
|
||||
task, err := exp.CreateTask(ctx, "me", codersdk.CreateTaskRequest{
|
||||
TemplateVersionID: template.ActiveVersionID,
|
||||
Input: input,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.True(t, task.WorkspaceID.Valid, "task should have a workspace")
|
||||
|
||||
workspace, err := client.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
return workspace
|
||||
}
|
||||
|
||||
t.Run("Autostart", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
ctx = testutil.Context(t, testutil.WaitShort)
|
||||
sched = mustSchedule(t, "CRON_TZ=UTC 0 * * * *")
|
||||
tickCh = make(chan time.Time)
|
||||
statsCh = make(chan autobuild.Stats)
|
||||
client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
AutobuildTicker: tickCh,
|
||||
IncludeProvisionerDaemon: true,
|
||||
AutobuildStats: statsCh,
|
||||
})
|
||||
admin = coderdtest.CreateFirstUser(t, client)
|
||||
)
|
||||
|
||||
// Given: A task workspace
|
||||
template := createTaskTemplate(t, client, admin.OrganizationID, ctx, 0)
|
||||
workspace := createTaskWorkspace(t, client, template, ctx, "test task for autostart")
|
||||
|
||||
// Given: The task workspace has an autostart schedule
|
||||
err := client.UpdateWorkspaceAutostart(ctx, workspace.ID, codersdk.UpdateWorkspaceAutostartRequest{
|
||||
Schedule: ptr.Ref(sched.String()),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Given: That the workspace is in a stopped state.
|
||||
workspace = coderdtest.MustTransitionWorkspace(t, client, workspace.ID, codersdk.WorkspaceTransitionStart, codersdk.WorkspaceTransitionStop)
|
||||
|
||||
p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, map[string]string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// When: the autobuild executor ticks after the scheduled time
|
||||
go func() {
|
||||
tickTime := sched.Next(workspace.LatestBuild.CreatedAt)
|
||||
coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime)
|
||||
tickCh <- tickTime
|
||||
close(tickCh)
|
||||
}()
|
||||
|
||||
// Then: We expect to see a start transition
|
||||
stats := <-statsCh
|
||||
require.Len(t, stats.Transitions, 1, "lifecycle executor should transition the task workspace")
|
||||
assert.Contains(t, stats.Transitions, workspace.ID, "task workspace should be in transitions")
|
||||
assert.Equal(t, database.WorkspaceTransitionStart, stats.Transitions[workspace.ID], "should autostart the workspace")
|
||||
require.Empty(t, stats.Errors, "should have no errors when managing task workspaces")
|
||||
})
|
||||
|
||||
t.Run("Autostop", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
ctx = testutil.Context(t, testutil.WaitShort)
|
||||
tickCh = make(chan time.Time)
|
||||
statsCh = make(chan autobuild.Stats)
|
||||
client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
AutobuildTicker: tickCh,
|
||||
IncludeProvisionerDaemon: true,
|
||||
AutobuildStats: statsCh,
|
||||
})
|
||||
admin = coderdtest.CreateFirstUser(t, client)
|
||||
)
|
||||
|
||||
// Given: A task workspace with an 8 hour deadline
|
||||
template := createTaskTemplate(t, client, admin.OrganizationID, ctx, 8*time.Hour)
|
||||
workspace := createTaskWorkspace(t, client, template, ctx, "test task for autostop")
|
||||
|
||||
// Given: The workspace is currently running
|
||||
workspace = coderdtest.MustWorkspace(t, client, workspace.ID)
|
||||
require.Equal(t, codersdk.WorkspaceTransitionStart, workspace.LatestBuild.Transition)
|
||||
require.NotZero(t, workspace.LatestBuild.Deadline, "workspace should have a deadline for autostop")
|
||||
|
||||
p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, map[string]string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// When: the autobuild executor ticks after the deadline
|
||||
go func() {
|
||||
tickTime := workspace.LatestBuild.Deadline.Time.Add(time.Minute)
|
||||
coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime)
|
||||
tickCh <- tickTime
|
||||
close(tickCh)
|
||||
}()
|
||||
|
||||
// Then: We expect to see a stop transition
|
||||
stats := <-statsCh
|
||||
require.Len(t, stats.Transitions, 1, "lifecycle executor should transition the task workspace")
|
||||
assert.Contains(t, stats.Transitions, workspace.ID, "task workspace should be in transitions")
|
||||
assert.Equal(t, database.WorkspaceTransitionStop, stats.Transitions[workspace.ID], "should autostop the workspace")
|
||||
require.Empty(t, stats.Errors, "should have no errors when managing task workspaces")
|
||||
})
|
||||
}
|
||||
|
||||
+20
-9
@@ -493,7 +493,7 @@ func New(options *Options) *API {
|
||||
// We add this middleware early, to make sure that authorization checks made
|
||||
// by other middleware get recorded.
|
||||
if buildinfo.IsDev() {
|
||||
r.Use(httpmw.RecordAuthzChecks)
|
||||
r.Use(httpmw.RecordAuthzChecks(options.DeploymentValues.EnableAuthzRecording.Value()))
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -985,6 +985,16 @@ func New(options *Options) *API {
|
||||
r.Post("/", api.postOAuth2ProviderAppToken())
|
||||
})
|
||||
|
||||
// RFC 7009 Token Revocation Endpoint
|
||||
r.Route("/revoke", func(r chi.Router) {
|
||||
r.Use(
|
||||
// RFC 7009 endpoint uses OAuth2 client authentication, not API key
|
||||
httpmw.AsAuthzSystem(httpmw.ExtractOAuth2ProviderAppWithOAuth2Errors(options.Database)),
|
||||
)
|
||||
// POST /revoke is the standard OAuth2 token revocation endpoint per RFC 7009
|
||||
r.Post("/", api.revokeOAuth2Token())
|
||||
})
|
||||
|
||||
// RFC 7591 Dynamic Client Registration - Public endpoint
|
||||
r.Post("/register", api.postOAuth2ClientRegistration())
|
||||
|
||||
@@ -1011,10 +1021,7 @@ func New(options *Options) *API {
|
||||
apiRateLimiter,
|
||||
httpmw.ReportCLITelemetry(api.Logger, options.Telemetry),
|
||||
)
|
||||
r.Route("/aitasks", func(r chi.Router) {
|
||||
r.Use(apiKeyMiddleware)
|
||||
r.Get("/prompts", api.aiTasksPrompts)
|
||||
})
|
||||
|
||||
r.Route("/tasks", func(r chi.Router) {
|
||||
r.Use(apiKeyMiddleware)
|
||||
|
||||
@@ -1022,11 +1029,15 @@ func New(options *Options) *API {
|
||||
|
||||
r.Route("/{user}", func(r chi.Router) {
|
||||
r.Use(httpmw.ExtractOrganizationMembersParam(options.Database, api.HTTPAuth.Authorize))
|
||||
r.Get("/{id}", api.taskGet)
|
||||
r.Delete("/{id}", api.taskDelete)
|
||||
r.Post("/{id}/send", api.taskSend)
|
||||
r.Get("/{id}/logs", api.taskLogs)
|
||||
r.Post("/", api.tasksCreate)
|
||||
|
||||
r.Route("/{task}", func(r chi.Router) {
|
||||
r.Use(httpmw.ExtractTaskParam(options.Database))
|
||||
r.Get("/", api.taskGet)
|
||||
r.Delete("/", api.taskDelete)
|
||||
r.Post("/send", api.taskSend)
|
||||
r.Get("/logs", api.taskLogs)
|
||||
})
|
||||
})
|
||||
})
|
||||
r.Route("/mcp", func(r chi.Router) {
|
||||
|
||||
@@ -1604,7 +1604,7 @@ func (nopcloser) Close() error { return nil }
|
||||
// SDKError coerces err into an SDK error.
|
||||
func SDKError(t testing.TB, err error) *codersdk.Error {
|
||||
var cerr *codersdk.Error
|
||||
require.True(t, errors.As(err, &cerr), "should be SDK error, got %w", err)
|
||||
require.True(t, errors.As(err, &cerr), "should be SDK error, got %s", err)
|
||||
return cerr
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ type CheckConstraint string
|
||||
|
||||
// CheckConstraint enums.
|
||||
const (
|
||||
CheckAPIKeysAllowListNotEmpty CheckConstraint = "api_keys_allow_list_not_empty" // api_keys
|
||||
CheckOneTimePasscodeSet CheckConstraint = "one_time_passcode_set" // users
|
||||
CheckUsersUsernameMinLength CheckConstraint = "users_username_min_length" // users
|
||||
CheckMaxProvisionerLogsLength CheckConstraint = "max_provisioner_logs_length" // provisioner_jobs
|
||||
@@ -13,6 +14,7 @@ const (
|
||||
CheckSubsystemsNotNone CheckConstraint = "subsystems_not_none" // workspace_agents
|
||||
CheckWorkspaceBuildsAiTaskSidebarAppIDRequired CheckConstraint = "workspace_builds_ai_task_sidebar_app_id_required" // workspace_builds
|
||||
CheckWorkspaceBuildsDeadlineBelowMaxDeadline CheckConstraint = "workspace_builds_deadline_below_max_deadline" // workspace_builds
|
||||
CheckTelemetryLockEventTypeConstraint CheckConstraint = "telemetry_lock_event_type_constraint" // telemetry_locks
|
||||
CheckValidationMonotonicOrder CheckConstraint = "validation_monotonic_order" // template_version_parameters
|
||||
CheckUsageEventTypeCheck CheckConstraint = "usage_event_type_check" // usage_events
|
||||
)
|
||||
|
||||
@@ -51,6 +51,13 @@ func ListLazy[F any, T any](convert func(F) T) func(list []F) []T {
|
||||
}
|
||||
}
|
||||
|
||||
func APIAllowListTarget(entry rbac.AllowListElement) codersdk.APIAllowListTarget {
|
||||
return codersdk.APIAllowListTarget{
|
||||
Type: codersdk.RBACResource(entry.Type),
|
||||
ID: entry.ID,
|
||||
}
|
||||
}
|
||||
|
||||
type ExternalAuthMeta struct {
|
||||
Authenticated bool
|
||||
ValidateError string
|
||||
@@ -189,6 +196,16 @@ func MinimalUser(user database.User) codersdk.MinimalUser {
|
||||
return codersdk.MinimalUser{
|
||||
ID: user.ID,
|
||||
Username: user.Username,
|
||||
Name: user.Name,
|
||||
AvatarURL: user.AvatarURL,
|
||||
}
|
||||
}
|
||||
|
||||
func MinimalUserFromVisibleUser(user database.VisibleUser) codersdk.MinimalUser {
|
||||
return codersdk.MinimalUser{
|
||||
ID: user.ID,
|
||||
Username: user.Username,
|
||||
Name: user.Name,
|
||||
AvatarURL: user.AvatarURL,
|
||||
}
|
||||
}
|
||||
@@ -197,7 +214,6 @@ func ReducedUser(user database.User) codersdk.ReducedUser {
|
||||
return codersdk.ReducedUser{
|
||||
MinimalUser: MinimalUser(user),
|
||||
Email: user.Email,
|
||||
Name: user.Name,
|
||||
CreatedAt: user.CreatedAt,
|
||||
UpdatedAt: user.UpdatedAt,
|
||||
LastSeenAt: user.LastSeenAt,
|
||||
@@ -374,6 +390,9 @@ func OAuth2ProviderApp(accessURL *url.URL, dbApp database.OAuth2ProviderApp) cod
|
||||
}).String(),
|
||||
// We do not currently support DeviceAuth.
|
||||
DeviceAuth: "",
|
||||
TokenRevoke: accessURL.ResolveReference(&url.URL{
|
||||
Path: "/oauth2/revoke",
|
||||
}).String(),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -695,12 +714,13 @@ func RBACRole(role rbac.Role) codersdk.Role {
|
||||
|
||||
orgPerms := role.ByOrgID[slim.OrganizationID]
|
||||
return codersdk.Role{
|
||||
Name: slim.Name,
|
||||
OrganizationID: slim.OrganizationID,
|
||||
DisplayName: slim.DisplayName,
|
||||
SitePermissions: List(role.Site, RBACPermission),
|
||||
OrganizationPermissions: List(orgPerms.Org, RBACPermission),
|
||||
UserPermissions: List(role.User, RBACPermission),
|
||||
Name: slim.Name,
|
||||
OrganizationID: slim.OrganizationID,
|
||||
DisplayName: slim.DisplayName,
|
||||
SitePermissions: List(role.Site, RBACPermission),
|
||||
UserPermissions: List(role.User, RBACPermission),
|
||||
OrganizationPermissions: List(orgPerms.Org, RBACPermission),
|
||||
OrganizationMemberPermissions: List(orgPerms.Member, RBACPermission),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -715,8 +735,8 @@ func Role(role database.CustomRole) codersdk.Role {
|
||||
OrganizationID: orgID,
|
||||
DisplayName: role.DisplayName,
|
||||
SitePermissions: List(role.SitePermissions, Permission),
|
||||
OrganizationPermissions: List(role.OrgPermissions, Permission),
|
||||
UserPermissions: List(role.UserPermissions, Permission),
|
||||
OrganizationPermissions: List(role.OrgPermissions, Permission),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -927,7 +947,7 @@ func PreviewParameterValidation(v *previewtypes.ParameterValidation) codersdk.Pr
|
||||
}
|
||||
}
|
||||
|
||||
func AIBridgeInterception(interception database.AIBridgeInterception, tokenUsages []database.AIBridgeTokenUsage, userPrompts []database.AIBridgeUserPrompt, toolUsages []database.AIBridgeToolUsage) codersdk.AIBridgeInterception {
|
||||
func AIBridgeInterception(interception database.AIBridgeInterception, initiator database.VisibleUser, tokenUsages []database.AIBridgeTokenUsage, userPrompts []database.AIBridgeUserPrompt, toolUsages []database.AIBridgeToolUsage) codersdk.AIBridgeInterception {
|
||||
sdkTokenUsages := List(tokenUsages, AIBridgeTokenUsage)
|
||||
sort.Slice(sdkTokenUsages, func(i, j int) bool {
|
||||
// created_at ASC
|
||||
@@ -943,9 +963,9 @@ func AIBridgeInterception(interception database.AIBridgeInterception, tokenUsage
|
||||
// created_at ASC
|
||||
return sdkToolUsages[i].CreatedAt.Before(sdkToolUsages[j].CreatedAt)
|
||||
})
|
||||
return codersdk.AIBridgeInterception{
|
||||
intc := codersdk.AIBridgeInterception{
|
||||
ID: interception.ID,
|
||||
InitiatorID: interception.InitiatorID,
|
||||
Initiator: MinimalUserFromVisibleUser(initiator),
|
||||
Provider: interception.Provider,
|
||||
Model: interception.Model,
|
||||
Metadata: jsonOrEmptyMap(interception.Metadata),
|
||||
@@ -954,6 +974,10 @@ func AIBridgeInterception(interception database.AIBridgeInterception, tokenUsage
|
||||
UserPrompts: sdkUserPrompts,
|
||||
ToolUsages: sdkToolUsages,
|
||||
}
|
||||
if interception.EndedAt.Valid {
|
||||
intc.EndedAt = &interception.EndedAt.Time
|
||||
}
|
||||
return intc
|
||||
}
|
||||
|
||||
func AIBridgeTokenUsage(usage database.AIBridgeTokenUsage) codersdk.AIBridgeTokenUsage {
|
||||
|
||||
@@ -85,10 +85,6 @@ func TestNestedInTx(t *testing.T) {
|
||||
func testSQLDB(t testing.TB) *sql.DB {
|
||||
t.Helper()
|
||||
|
||||
if !dbtestutil.WillUsePostgres() {
|
||||
t.Skip("this test requires postgres")
|
||||
}
|
||||
|
||||
connection, err := dbtestutil.Open(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -219,8 +219,8 @@ var (
|
||||
rbac.ResourceUser.Type: {policy.ActionRead, policy.ActionReadPersonal, policy.ActionUpdatePersonal},
|
||||
rbac.ResourceWorkspaceDormant.Type: {policy.ActionDelete, policy.ActionRead, policy.ActionUpdate, policy.ActionWorkspaceStop},
|
||||
rbac.ResourceWorkspace.Type: {policy.ActionDelete, policy.ActionRead, policy.ActionUpdate, policy.ActionWorkspaceStart, policy.ActionWorkspaceStop, policy.ActionCreateAgent},
|
||||
// Provisionerd needs to read and update tasks associated with workspaces.
|
||||
rbac.ResourceTask.Type: {policy.ActionRead, policy.ActionUpdate},
|
||||
// Provisionerd needs to read, update, and delete tasks associated with workspaces.
|
||||
rbac.ResourceTask.Type: {policy.ActionRead, policy.ActionUpdate, policy.ActionDelete},
|
||||
rbac.ResourceApiKey.Type: {policy.WildcardSymbol},
|
||||
// When org scoped provisioner credentials are implemented,
|
||||
// this can be reduced to read a specific org.
|
||||
@@ -254,6 +254,7 @@ var (
|
||||
rbac.ResourceFile.Type: {policy.ActionRead}, // Required to read terraform files
|
||||
rbac.ResourceNotificationMessage.Type: {policy.ActionCreate, policy.ActionRead},
|
||||
rbac.ResourceSystem.Type: {policy.WildcardSymbol},
|
||||
rbac.ResourceTask.Type: {policy.ActionRead, policy.ActionUpdate},
|
||||
rbac.ResourceTemplate.Type: {policy.ActionRead, policy.ActionUpdate},
|
||||
rbac.ResourceUser.Type: {policy.ActionRead},
|
||||
rbac.ResourceWorkspace.Type: {policy.ActionDelete, policy.ActionRead, policy.ActionUpdate, policy.ActionWorkspaceStart, policy.ActionWorkspaceStop},
|
||||
@@ -395,11 +396,13 @@ var (
|
||||
Identifier: rbac.RoleIdentifier{Name: "subagentapi"},
|
||||
DisplayName: "Sub Agent API",
|
||||
Site: []rbac.Permission{},
|
||||
User: rbac.Permissions(map[string][]policy.Action{
|
||||
rbac.ResourceWorkspace.Type: {policy.ActionRead, policy.ActionUpdate, policy.ActionCreateAgent, policy.ActionDeleteAgent},
|
||||
}),
|
||||
User: []rbac.Permission{},
|
||||
ByOrgID: map[string]rbac.OrgPermissions{
|
||||
orgID.String(): {},
|
||||
orgID.String(): {
|
||||
Member: rbac.Permissions(map[string][]policy.Action{
|
||||
rbac.ResourceWorkspace.Type: {policy.ActionRead, policy.ActionUpdate, policy.ActionCreateAgent, policy.ActionDeleteAgent},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
@@ -446,6 +449,34 @@ var (
|
||||
Scope: rbac.ScopeAll,
|
||||
}.WithCachedASTValue()
|
||||
|
||||
subjectSystemOAuth2 = rbac.Subject{
|
||||
Type: rbac.SubjectTypeSystemOAuth,
|
||||
FriendlyName: "System OAuth2",
|
||||
ID: uuid.Nil.String(),
|
||||
Roles: rbac.Roles([]rbac.Role{
|
||||
{
|
||||
Identifier: rbac.RoleIdentifier{Name: "system-oauth2"},
|
||||
DisplayName: "System OAuth2",
|
||||
Site: rbac.Permissions(map[string][]policy.Action{
|
||||
// OAuth2 resources - full CRUD permissions
|
||||
rbac.ResourceOauth2App.Type: rbac.ResourceOauth2App.AvailableActions(),
|
||||
rbac.ResourceOauth2AppSecret.Type: rbac.ResourceOauth2AppSecret.AvailableActions(),
|
||||
rbac.ResourceOauth2AppCodeToken.Type: rbac.ResourceOauth2AppCodeToken.AvailableActions(),
|
||||
|
||||
// API key permissions needed for OAuth2 token revocation
|
||||
rbac.ResourceApiKey.Type: {policy.ActionRead, policy.ActionDelete},
|
||||
|
||||
// Minimal read permissions that might be needed for OAuth2 operations
|
||||
rbac.ResourceUser.Type: {policy.ActionRead},
|
||||
rbac.ResourceOrganization.Type: {policy.ActionRead},
|
||||
}),
|
||||
User: []rbac.Permission{},
|
||||
ByOrgID: map[string]rbac.OrgPermissions{},
|
||||
},
|
||||
}),
|
||||
Scope: rbac.ScopeAll,
|
||||
}.WithCachedASTValue()
|
||||
|
||||
subjectSystemReadProvisionerDaemons = rbac.Subject{
|
||||
Type: rbac.SubjectTypeSystemReadProvisionerDaemons,
|
||||
FriendlyName: "Provisioner Daemons Reader",
|
||||
@@ -643,6 +674,12 @@ func AsSystemRestricted(ctx context.Context) context.Context {
|
||||
return As(ctx, subjectSystemRestricted)
|
||||
}
|
||||
|
||||
// AsSystemOAuth2 returns a context with an actor that has permissions
|
||||
// required for OAuth2 provider operations (token revocation, device codes, registration).
|
||||
func AsSystemOAuth2(ctx context.Context) context.Context {
|
||||
return As(ctx, subjectSystemOAuth2)
|
||||
}
|
||||
|
||||
// AsSystemReadProvisionerDaemons returns a context with an actor that has permissions
|
||||
// to read provisioner daemons.
|
||||
func AsSystemReadProvisionerDaemons(ctx context.Context) context.Context {
|
||||
@@ -1256,14 +1293,17 @@ func (q *querier) customRoleCheck(ctx context.Context, role database.CustomRole)
|
||||
return xerrors.Errorf("invalid role: %w", err)
|
||||
}
|
||||
|
||||
if len(rbacRole.ByOrgID) > 0 && len(rbacRole.Site) > 0 {
|
||||
// This is a choice to keep roles simple. If we allow mixing site and org scoped perms, then knowing who can
|
||||
// do what gets more complicated.
|
||||
return xerrors.Errorf("invalid custom role, cannot assign both org and site permissions at the same time")
|
||||
if len(rbacRole.ByOrgID) > 0 && (len(rbacRole.Site) > 0 || len(rbacRole.User) > 0) {
|
||||
// This is a choice to keep roles simple. If we allow mixing site and org
|
||||
// scoped perms, then knowing who can do what gets more complicated. Roles
|
||||
// should either be entirely org-scoped or entirely unrelated to
|
||||
// organizations.
|
||||
return xerrors.Errorf("invalid custom role, cannot assign both org-scoped and site/user permissions at the same time")
|
||||
}
|
||||
|
||||
if len(rbacRole.ByOrgID) > 1 {
|
||||
// Again to avoid more complexity in our roles
|
||||
// Again to avoid more complexity in our roles. Roles are limited to one
|
||||
// organization.
|
||||
return xerrors.Errorf("invalid custom role, cannot assign permissions to more than 1 org at a time")
|
||||
}
|
||||
|
||||
@@ -1279,7 +1319,18 @@ func (q *querier) customRoleCheck(ctx context.Context, role database.CustomRole)
|
||||
for _, orgPerm := range perms.Org {
|
||||
err := q.customRoleEscalationCheck(ctx, act, orgPerm, rbac.Object{OrgID: orgID, Type: orgPerm.ResourceType})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("org=%q: %w", orgID, err)
|
||||
return xerrors.Errorf("org=%q: org: %w", orgID, err)
|
||||
}
|
||||
}
|
||||
for _, memberPerm := range perms.Member {
|
||||
// The person giving the permission should still be required to have
|
||||
// the permissions throughout the org in order to give individuals the
|
||||
// same permission among their own resources, since the role can be given
|
||||
// to anyone. The `Owner` is intentionally omitted from the `Object` to
|
||||
// enforce this.
|
||||
err := q.customRoleEscalationCheck(ctx, act, memberPerm, rbac.Object{OrgID: orgID, Type: memberPerm.ResourceType})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("org=%q: member: %w", orgID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1297,8 +1348,8 @@ func (q *querier) customRoleCheck(ctx context.Context, role database.CustomRole)
|
||||
func (q *querier) authorizeProvisionerJob(ctx context.Context, job database.ProvisionerJob) error {
|
||||
switch job.Type {
|
||||
case database.ProvisionerJobTypeWorkspaceBuild:
|
||||
// Authorized call to get workspace build. If we can read the build, we
|
||||
// can read the job.
|
||||
// Authorized call to get workspace build. If we can read the build, we can
|
||||
// read the job.
|
||||
_, err := q.GetWorkspaceBuildByJobID(ctx, job.ID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fetch related workspace build: %w", err)
|
||||
@@ -1341,8 +1392,8 @@ func (q *querier) ActivityBumpWorkspace(ctx context.Context, arg database.Activi
|
||||
}
|
||||
|
||||
func (q *querier) AllUserIDs(ctx context.Context, includeSystem bool) ([]uuid.UUID, error) {
|
||||
// Although this technically only reads users, only system-related functions should be
|
||||
// allowed to call this.
|
||||
// Although this technically only reads users, only system-related functions
|
||||
// should be allowed to call this.
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceSystem); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1361,8 +1412,8 @@ func (q *querier) ArchiveUnusedTemplateVersions(ctx context.Context, arg databas
|
||||
}
|
||||
|
||||
func (q *querier) BatchUpdateWorkspaceLastUsedAt(ctx context.Context, arg database.BatchUpdateWorkspaceLastUsedAtParams) error {
|
||||
// Could be any workspace and checking auth to each workspace is overkill for the purpose
|
||||
// of this function.
|
||||
// Could be any workspace and checking auth to each workspace is overkill for
|
||||
// the purpose of this function.
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceWorkspace.All()); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1390,6 +1441,13 @@ func (q *querier) BulkMarkNotificationMessagesSent(ctx context.Context, arg data
|
||||
return q.db.BulkMarkNotificationMessagesSent(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) CalculateAIBridgeInterceptionsTelemetrySummary(ctx context.Context, arg database.CalculateAIBridgeInterceptionsTelemetrySummaryParams) (database.CalculateAIBridgeInterceptionsTelemetrySummaryRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceAibridgeInterception); err != nil {
|
||||
return database.CalculateAIBridgeInterceptionsTelemetrySummaryRow{}, err
|
||||
}
|
||||
return q.db.CalculateAIBridgeInterceptionsTelemetrySummary(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) ClaimPrebuiltWorkspace(ctx context.Context, arg database.ClaimPrebuiltWorkspaceParams) (database.ClaimPrebuiltWorkspaceRow, error) {
|
||||
empty := database.ClaimPrebuiltWorkspaceRow{}
|
||||
|
||||
@@ -1478,6 +1536,13 @@ func (q *querier) CountInProgressPrebuilds(ctx context.Context) ([]database.Coun
|
||||
return q.db.CountInProgressPrebuilds(ctx)
|
||||
}
|
||||
|
||||
func (q *querier) CountPendingNonActivePrebuilds(ctx context.Context) ([]database.CountPendingNonActivePrebuildsRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceWorkspace.All()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q.db.CountPendingNonActivePrebuilds(ctx)
|
||||
}
|
||||
|
||||
func (q *querier) CountUnreadInboxNotificationsByUserID(ctx context.Context, userID uuid.UUID) (int64, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceInboxNotification.WithOwner(userID.String())); err != nil {
|
||||
return 0, err
|
||||
@@ -1682,6 +1747,13 @@ func (q *querier) DeleteOldProvisionerDaemons(ctx context.Context) error {
|
||||
return q.db.DeleteOldProvisionerDaemons(ctx)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteOldTelemetryLocks(ctx context.Context, beforeTime time.Time) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceSystem); err != nil {
|
||||
return err
|
||||
}
|
||||
return q.db.DeleteOldTelemetryLocks(ctx, beforeTime)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteOldWorkspaceAgentLogs(ctx context.Context, threshold time.Time) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceSystem); err != nil {
|
||||
return err
|
||||
@@ -1764,6 +1836,19 @@ func (q *querier) DeleteTailnetTunnel(ctx context.Context, arg database.DeleteTa
|
||||
return q.db.DeleteTailnetTunnel(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteTask(ctx context.Context, arg database.DeleteTaskParams) (database.TaskTable, error) {
|
||||
task, err := q.db.GetTaskByID(ctx, arg.ID)
|
||||
if err != nil {
|
||||
return database.TaskTable{}, err
|
||||
}
|
||||
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, task.RBACObject()); err != nil {
|
||||
return database.TaskTable{}, err
|
||||
}
|
||||
|
||||
return q.db.DeleteTask(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteUserSecret(ctx context.Context, id uuid.UUID) error {
|
||||
// First get the secret to check ownership
|
||||
secret, err := q.GetUserSecret(ctx, id)
|
||||
@@ -2428,7 +2513,7 @@ func (q *querier) GetOAuth2ProviderAppByID(ctx context.Context, id uuid.UUID) (d
|
||||
return q.db.GetOAuth2ProviderAppByID(ctx, id)
|
||||
}
|
||||
|
||||
func (q *querier) GetOAuth2ProviderAppByRegistrationToken(ctx context.Context, registrationAccessToken sql.NullString) (database.OAuth2ProviderApp, error) {
|
||||
func (q *querier) GetOAuth2ProviderAppByRegistrationToken(ctx context.Context, registrationAccessToken []byte) (database.OAuth2ProviderApp, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceOauth2App); err != nil {
|
||||
return database.OAuth2ProviderApp{}, err
|
||||
}
|
||||
@@ -2564,6 +2649,13 @@ func (q *querier) GetOrganizationsByUserID(ctx context.Context, userID database.
|
||||
return fetchWithPostFilter(q.auth, policy.ActionRead, q.db.GetOrganizationsByUserID)(ctx, userID)
|
||||
}
|
||||
|
||||
func (q *querier) GetOrganizationsWithPrebuildStatus(ctx context.Context, arg database.GetOrganizationsWithPrebuildStatusParams) ([]database.GetOrganizationsWithPrebuildStatusRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceOrganization.All()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q.db.GetOrganizationsWithPrebuildStatus(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) GetParameterSchemasByJobID(ctx context.Context, jobID uuid.UUID) ([]database.ParameterSchema, error) {
|
||||
version, err := q.db.GetTemplateVersionByJobID(ctx, jobID)
|
||||
if err != nil {
|
||||
@@ -4158,6 +4250,13 @@ func (q *querier) InsertTelemetryItemIfNotExists(ctx context.Context, arg databa
|
||||
return q.db.InsertTelemetryItemIfNotExists(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) InsertTelemetryLock(ctx context.Context, arg database.InsertTelemetryLockParams) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionCreate, rbac.ResourceSystem); err != nil {
|
||||
return err
|
||||
}
|
||||
return q.db.InsertTelemetryLock(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) InsertTemplate(ctx context.Context, arg database.InsertTemplateParams) error {
|
||||
obj := rbac.ResourceTemplate.InOrg(arg.OrganizationID)
|
||||
if err := q.authorizeContext(ctx, policy.ActionCreate, obj); err != nil {
|
||||
@@ -4461,7 +4560,7 @@ func (q *querier) InsertWorkspaceResourceMetadata(ctx context.Context, arg datab
|
||||
return q.db.InsertWorkspaceResourceMetadata(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) ListAIBridgeInterceptions(ctx context.Context, arg database.ListAIBridgeInterceptionsParams) ([]database.AIBridgeInterception, error) {
|
||||
func (q *querier) ListAIBridgeInterceptions(ctx context.Context, arg database.ListAIBridgeInterceptionsParams) ([]database.ListAIBridgeInterceptionsRow, error) {
|
||||
prep, err := prepareSQLFilter(ctx, q.auth, policy.ActionRead, rbac.ResourceAibridgeInterception.Type)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("(dev error) prepare sql filter: %w", err)
|
||||
@@ -4469,6 +4568,13 @@ func (q *querier) ListAIBridgeInterceptions(ctx context.Context, arg database.Li
|
||||
return q.db.ListAuthorizedAIBridgeInterceptions(ctx, arg, prep)
|
||||
}
|
||||
|
||||
func (q *querier) ListAIBridgeInterceptionsTelemetrySummaries(ctx context.Context, arg database.ListAIBridgeInterceptionsTelemetrySummariesParams) ([]database.ListAIBridgeInterceptionsTelemetrySummariesRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceAibridgeInterception); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q.db.ListAIBridgeInterceptionsTelemetrySummaries(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) ListAIBridgeTokenUsagesByInterceptionIDs(ctx context.Context, interceptionIDs []uuid.UUID) ([]database.AIBridgeTokenUsage, error) {
|
||||
// This function is a system function until we implement a join for aibridge interceptions.
|
||||
// Matches the behavior of the workspaces listing endpoint.
|
||||
@@ -4657,6 +4763,13 @@ func (q *querier) UnfavoriteWorkspace(ctx context.Context, id uuid.UUID) error {
|
||||
return update(q.log, q.auth, fetch, q.db.UnfavoriteWorkspace)(ctx, id)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateAIBridgeInterceptionEnded(ctx context.Context, params database.UpdateAIBridgeInterceptionEndedParams) (database.AIBridgeInterception, error) {
|
||||
if err := q.authorizeAIBridgeInterceptionAction(ctx, policy.ActionUpdate, params.ID); err != nil {
|
||||
return database.AIBridgeInterception{}, err
|
||||
}
|
||||
return q.db.UpdateAIBridgeInterceptionEnded(ctx, params)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateAPIKeyByID(ctx context.Context, arg database.UpdateAPIKeyByIDParams) error {
|
||||
fetch := func(ctx context.Context, arg database.UpdateAPIKeyByIDParams) (database.APIKey, error) {
|
||||
return q.db.GetAPIKeyByID(ctx, arg.ID)
|
||||
@@ -4828,6 +4941,14 @@ func (q *querier) UpdateOrganizationDeletedByID(ctx context.Context, arg databas
|
||||
return deleteQ(q.log, q.auth, q.db.GetOrganizationByID, deleteF)(ctx, arg.ID)
|
||||
}
|
||||
|
||||
func (q *querier) UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg database.UpdatePrebuildProvisionerJobWithCancelParams) ([]database.UpdatePrebuildProvisionerJobWithCancelRow, error) {
|
||||
// Prebuild operation for canceling pending prebuild jobs from non-active template versions
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourcePrebuiltWorkspace); err != nil {
|
||||
return []database.UpdatePrebuildProvisionerJobWithCancelRow{}, err
|
||||
}
|
||||
return q.db.UpdatePrebuildProvisionerJobWithCancel(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdatePresetPrebuildStatus(ctx context.Context, arg database.UpdatePresetPrebuildStatusParams) error {
|
||||
preset, err := q.db.GetPresetByID(ctx, arg.PresetID)
|
||||
if err != nil {
|
||||
@@ -4975,6 +5096,30 @@ func (q *querier) UpdateTailnetPeerStatusByCoordinator(ctx context.Context, arg
|
||||
return q.db.UpdateTailnetPeerStatusByCoordinator(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateTaskWorkspaceID(ctx context.Context, arg database.UpdateTaskWorkspaceIDParams) (database.TaskTable, error) {
|
||||
// An actor is allowed to update the workspace ID of a task if they are the
|
||||
// owner of the task and workspace or have the appropriate permissions.
|
||||
task, err := q.db.GetTaskByID(ctx, arg.ID)
|
||||
if err != nil {
|
||||
return database.TaskTable{}, err
|
||||
}
|
||||
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, task.RBACObject()); err != nil {
|
||||
return database.TaskTable{}, err
|
||||
}
|
||||
|
||||
ws, err := q.db.GetWorkspaceByID(ctx, arg.WorkspaceID.UUID)
|
||||
if err != nil {
|
||||
return database.TaskTable{}, err
|
||||
}
|
||||
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, ws.RBACObject()); err != nil {
|
||||
return database.TaskTable{}, err
|
||||
}
|
||||
|
||||
return q.db.UpdateTaskWorkspaceID(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateTemplateACLByID(ctx context.Context, arg database.UpdateTemplateACLByIDParams) error {
|
||||
fetch := func(ctx context.Context, arg database.UpdateTemplateACLByIDParams) (database.Template, error) {
|
||||
return q.db.GetTemplateByID(ctx, arg.ID)
|
||||
@@ -5870,7 +6015,7 @@ func (q *querier) CountAuthorizedConnectionLogs(ctx context.Context, arg databas
|
||||
return q.CountConnectionLogs(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) ListAuthorizedAIBridgeInterceptions(ctx context.Context, arg database.ListAIBridgeInterceptionsParams, _ rbac.PreparedAuthorized) ([]database.AIBridgeInterception, error) {
|
||||
func (q *querier) ListAuthorizedAIBridgeInterceptions(ctx context.Context, arg database.ListAIBridgeInterceptionsParams, _ rbac.PreparedAuthorized) ([]database.ListAIBridgeInterceptionsRow, error) {
|
||||
// TODO: Delete this function, all ListAIBridgeInterceptions should be authorized. For now just call ListAIBridgeInterceptions on the authz querier.
|
||||
// This cannot be deleted for now because it's included in the
|
||||
// database.Store interface, so dbauthz needs to implement it.
|
||||
|
||||
@@ -641,6 +641,19 @@ func (s *MethodTestSuite) TestProvisionerJob() {
|
||||
dbm.EXPECT().UpdateProvisionerJobWithCancelByID(gomock.Any(), arg).Return(nil).AnyTimes()
|
||||
check.Args(arg).Asserts(v.RBACObject(tpl), []policy.Action{policy.ActionRead, policy.ActionUpdate}).Returns()
|
||||
}))
|
||||
s.Run("UpdatePrebuildProvisionerJobWithCancel", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
arg := database.UpdatePrebuildProvisionerJobWithCancelParams{
|
||||
PresetID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
Now: dbtime.Now(),
|
||||
}
|
||||
canceledJobs := []database.UpdatePrebuildProvisionerJobWithCancelRow{
|
||||
{ID: uuid.New(), WorkspaceID: uuid.New(), TemplateID: uuid.New(), TemplateVersionPresetID: uuid.NullUUID{UUID: uuid.New(), Valid: true}},
|
||||
{ID: uuid.New(), WorkspaceID: uuid.New(), TemplateID: uuid.New(), TemplateVersionPresetID: uuid.NullUUID{UUID: uuid.New(), Valid: true}},
|
||||
}
|
||||
|
||||
dbm.EXPECT().UpdatePrebuildProvisionerJobWithCancel(gomock.Any(), arg).Return(canceledJobs, nil).AnyTimes()
|
||||
check.Args(arg).Asserts(rbac.ResourcePrebuiltWorkspace, policy.ActionUpdate).Returns(canceledJobs)
|
||||
}))
|
||||
s.Run("GetProvisionerJobsByIDs", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
org := testutil.Fake(s.T(), faker, database.Organization{})
|
||||
org2 := testutil.Fake(s.T(), faker, database.Organization{})
|
||||
@@ -2362,6 +2375,16 @@ func (s *MethodTestSuite) TestTasks() {
|
||||
dbm.EXPECT().GetTaskByID(gomock.Any(), task.ID).Return(task, nil).AnyTimes()
|
||||
check.Args(task.ID).Asserts(task, policy.ActionRead).Returns(task)
|
||||
}))
|
||||
s.Run("DeleteTask", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
task := testutil.Fake(s.T(), faker, database.Task{})
|
||||
arg := database.DeleteTaskParams{
|
||||
ID: task.ID,
|
||||
DeletedAt: dbtime.Now(),
|
||||
}
|
||||
dbm.EXPECT().GetTaskByID(gomock.Any(), task.ID).Return(task, nil).AnyTimes()
|
||||
dbm.EXPECT().DeleteTask(gomock.Any(), arg).Return(database.TaskTable{}, nil).AnyTimes()
|
||||
check.Args(arg).Asserts(task, policy.ActionDelete).Returns(database.TaskTable{})
|
||||
}))
|
||||
s.Run("InsertTask", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
tpl := testutil.Fake(s.T(), faker, database.Template{})
|
||||
tv := testutil.Fake(s.T(), faker, database.TemplateVersion{
|
||||
@@ -2395,6 +2418,20 @@ func (s *MethodTestSuite) TestTasks() {
|
||||
|
||||
check.Args(arg).Asserts(task, policy.ActionUpdate).Returns(database.TaskWorkspaceApp{})
|
||||
}))
|
||||
s.Run("UpdateTaskWorkspaceID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
task := testutil.Fake(s.T(), faker, database.Task{})
|
||||
ws := testutil.Fake(s.T(), faker, database.Workspace{})
|
||||
arg := database.UpdateTaskWorkspaceIDParams{
|
||||
ID: task.ID,
|
||||
WorkspaceID: uuid.NullUUID{UUID: ws.ID, Valid: true},
|
||||
}
|
||||
|
||||
dbm.EXPECT().GetTaskByID(gomock.Any(), task.ID).Return(task, nil).AnyTimes()
|
||||
dbm.EXPECT().GetWorkspaceByID(gomock.Any(), ws.ID).Return(ws, nil).AnyTimes()
|
||||
dbm.EXPECT().UpdateTaskWorkspaceID(gomock.Any(), arg).Return(database.TaskTable{}, nil).AnyTimes()
|
||||
|
||||
check.Args(arg).Asserts(task, policy.ActionUpdate, ws, policy.ActionUpdate).Returns(database.TaskTable{})
|
||||
}))
|
||||
s.Run("GetTaskByWorkspaceID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
task := testutil.Fake(s.T(), faker, database.Task{})
|
||||
task.WorkspaceID = uuid.NullUUID{UUID: uuid.New(), Valid: true}
|
||||
@@ -2946,7 +2983,6 @@ func (s *MethodTestSuite) TestSystemFunctions() {
|
||||
dbm.EXPECT().GetParameterSchemasByJobID(gomock.Any(), jobID).Return([]database.ParameterSchema{}, nil).AnyTimes()
|
||||
check.Args(jobID).
|
||||
Asserts(tpl, policy.ActionRead).
|
||||
ErrorsWithInMemDB(sql.ErrNoRows).
|
||||
Returns([]database.ParameterSchema{})
|
||||
}))
|
||||
s.Run("GetWorkspaceAppsByAgentIDs", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
@@ -3189,7 +3225,7 @@ func (s *MethodTestSuite) TestSystemFunctions() {
|
||||
}))
|
||||
s.Run("GetAppSecurityKey", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
dbm.EXPECT().GetAppSecurityKey(gomock.Any()).Return("", sql.ErrNoRows).AnyTimes()
|
||||
check.Args().Asserts(rbac.ResourceSystem, policy.ActionRead).ErrorsWithPG(sql.ErrNoRows)
|
||||
check.Args().Asserts(rbac.ResourceSystem, policy.ActionRead).Errors(sql.ErrNoRows)
|
||||
}))
|
||||
s.Run("UpsertAppSecurityKey", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
dbm.EXPECT().UpsertAppSecurityKey(gomock.Any(), "foo").Return(nil).AnyTimes()
|
||||
@@ -3723,6 +3759,14 @@ func (s *MethodTestSuite) TestPrebuilds() {
|
||||
dbm.EXPECT().GetPrebuildMetrics(gomock.Any()).Return([]database.GetPrebuildMetricsRow{}, nil).AnyTimes()
|
||||
check.Args().Asserts(rbac.ResourceWorkspace.All(), policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetOrganizationsWithPrebuildStatus", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
arg := database.GetOrganizationsWithPrebuildStatusParams{
|
||||
UserID: uuid.New(),
|
||||
GroupName: "test",
|
||||
}
|
||||
dbm.EXPECT().GetOrganizationsWithPrebuildStatus(gomock.Any(), arg).Return([]database.GetOrganizationsWithPrebuildStatusRow{}, nil).AnyTimes()
|
||||
check.Args(arg).Asserts(rbac.ResourceOrganization.All(), policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetPrebuildsSettings", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
dbm.EXPECT().GetPrebuildsSettings(gomock.Any()).Return("{}", nil).AnyTimes()
|
||||
check.Args().Asserts()
|
||||
@@ -3735,6 +3779,10 @@ func (s *MethodTestSuite) TestPrebuilds() {
|
||||
dbm.EXPECT().CountInProgressPrebuilds(gomock.Any()).Return([]database.CountInProgressPrebuildsRow{}, nil).AnyTimes()
|
||||
check.Args().Asserts(rbac.ResourceWorkspace.All(), policy.ActionRead)
|
||||
}))
|
||||
s.Run("CountPendingNonActivePrebuilds", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
dbm.EXPECT().CountPendingNonActivePrebuilds(gomock.Any()).Return([]database.CountPendingNonActivePrebuildsRow{}, nil).AnyTimes()
|
||||
check.Args().Asserts(rbac.ResourceWorkspace.All(), policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetPresetsAtFailureLimit", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
dbm.EXPECT().GetPresetsAtFailureLimit(gomock.Any(), int64(0)).Return([]database.GetPresetsAtFailureLimitRow{}, nil).AnyTimes()
|
||||
check.Args(int64(0)).Asserts(rbac.ResourceTemplate.All(), policy.ActionViewInsights)
|
||||
@@ -3902,9 +3950,9 @@ func (s *MethodTestSuite) TestOAuth2ProviderApps() {
|
||||
}))
|
||||
s.Run("GetOAuth2ProviderAppByRegistrationToken", s.Subtest(func(db database.Store, check *expects) {
|
||||
app := dbgen.OAuth2ProviderApp(s.T(), db, database.OAuth2ProviderApp{
|
||||
RegistrationAccessToken: sql.NullString{String: "test-token", Valid: true},
|
||||
RegistrationAccessToken: []byte("test-token"),
|
||||
})
|
||||
check.Args(sql.NullString{String: "test-token", Valid: true}).Asserts(rbac.ResourceOauth2App, policy.ActionRead).Returns(app)
|
||||
check.Args([]byte("test-token")).Asserts(rbac.ResourceOauth2App, policy.ActionRead).Returns(app)
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -4537,14 +4585,14 @@ func (s *MethodTestSuite) TestAIBridge() {
|
||||
|
||||
s.Run("ListAIBridgeInterceptions", s.Mocked(func(db *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
params := database.ListAIBridgeInterceptionsParams{}
|
||||
db.EXPECT().ListAuthorizedAIBridgeInterceptions(gomock.Any(), params, gomock.Any()).Return([]database.AIBridgeInterception{}, nil).AnyTimes()
|
||||
db.EXPECT().ListAuthorizedAIBridgeInterceptions(gomock.Any(), params, gomock.Any()).Return([]database.ListAIBridgeInterceptionsRow{}, nil).AnyTimes()
|
||||
// No asserts here because SQLFilter.
|
||||
check.Args(params).Asserts()
|
||||
}))
|
||||
|
||||
s.Run("ListAuthorizedAIBridgeInterceptions", s.Mocked(func(db *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
params := database.ListAIBridgeInterceptionsParams{}
|
||||
db.EXPECT().ListAuthorizedAIBridgeInterceptions(gomock.Any(), params, gomock.Any()).Return([]database.AIBridgeInterception{}, nil).AnyTimes()
|
||||
db.EXPECT().ListAuthorizedAIBridgeInterceptions(gomock.Any(), params, gomock.Any()).Return([]database.ListAIBridgeInterceptionsRow{}, nil).AnyTimes()
|
||||
// No asserts here because SQLFilter.
|
||||
check.Args(params, emptyPreparedAuthorized{}).Asserts()
|
||||
}))
|
||||
@@ -4580,4 +4628,35 @@ func (s *MethodTestSuite) TestAIBridge() {
|
||||
db.EXPECT().ListAIBridgeToolUsagesByInterceptionIDs(gomock.Any(), ids).Return([]database.AIBridgeToolUsage{}, nil).AnyTimes()
|
||||
check.Args(ids).Asserts(rbac.ResourceSystem, policy.ActionRead).Returns([]database.AIBridgeToolUsage{})
|
||||
}))
|
||||
|
||||
s.Run("UpdateAIBridgeInterceptionEnded", s.Mocked(func(db *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
intcID := uuid.UUID{1}
|
||||
params := database.UpdateAIBridgeInterceptionEndedParams{ID: intcID}
|
||||
intc := testutil.Fake(s.T(), faker, database.AIBridgeInterception{ID: intcID})
|
||||
db.EXPECT().GetAIBridgeInterceptionByID(gomock.Any(), intcID).Return(intc, nil).AnyTimes() // Validation.
|
||||
db.EXPECT().UpdateAIBridgeInterceptionEnded(gomock.Any(), params).Return(intc, nil).AnyTimes()
|
||||
check.Args(params).Asserts(intc, policy.ActionUpdate).Returns(intc)
|
||||
}))
|
||||
}
|
||||
|
||||
func (s *MethodTestSuite) TestTelemetry() {
|
||||
s.Run("InsertTelemetryLock", s.Mocked(func(db *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
db.EXPECT().InsertTelemetryLock(gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
|
||||
check.Args(database.InsertTelemetryLockParams{}).Asserts(rbac.ResourceSystem, policy.ActionCreate)
|
||||
}))
|
||||
|
||||
s.Run("DeleteOldTelemetryLocks", s.Mocked(func(db *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
db.EXPECT().DeleteOldTelemetryLocks(gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
|
||||
check.Args(time.Time{}).Asserts(rbac.ResourceSystem, policy.ActionDelete)
|
||||
}))
|
||||
|
||||
s.Run("ListAIBridgeInterceptionsTelemetrySummaries", s.Mocked(func(db *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
db.EXPECT().ListAIBridgeInterceptionsTelemetrySummaries(gomock.Any(), gomock.Any()).Return([]database.ListAIBridgeInterceptionsTelemetrySummariesRow{}, nil).AnyTimes()
|
||||
check.Args(database.ListAIBridgeInterceptionsTelemetrySummariesParams{}).Asserts(rbac.ResourceAibridgeInterception, policy.ActionRead)
|
||||
}))
|
||||
|
||||
s.Run("CalculateAIBridgeInterceptionsTelemetrySummary", s.Mocked(func(db *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
db.EXPECT().CalculateAIBridgeInterceptionsTelemetrySummary(gomock.Any(), gomock.Any()).Return(database.CalculateAIBridgeInterceptionsTelemetrySummaryRow{}, nil).AnyTimes()
|
||||
check.Args(database.CalculateAIBridgeInterceptionsTelemetrySummaryParams{}).Asserts(rbac.ResourceAibridgeInterception, policy.ActionRead)
|
||||
}))
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user