Compare commits
120 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 368a6fcc24 | |||
| 89d3874904 | |||
| 26d029022d | |||
| 2a5d86e2aa | |||
| eef18424e3 | |||
| 926369b9f2 | |||
| e17b445e55 | |||
| dc5b877f26 | |||
| cb5ddec5c5 | |||
| eb020611a3 | |||
| 697b3a0a06 | |||
| acd6fe7aeb | |||
| 0b214ad7f6 | |||
| 17438d9730 | |||
| 279288affe | |||
| c571995a42 | |||
| bea2f8633a | |||
| dc9166b4cd | |||
| db22227f08 | |||
| 0eb8e904a1 | |||
| 2734123ac2 | |||
| 37432aefa6 | |||
| cf746f3a87 | |||
| ea533aa522 | |||
| 979df63788 | |||
| c4e9749146 | |||
| fb785d3524 | |||
| a899fc57a6 | |||
| 77e2521fa0 | |||
| c627a68e96 | |||
| 7ae3fdc749 | |||
| 59921fa5fa | |||
| 976bcf22d5 | |||
| ef5002d534 | |||
| 251a3e9de1 | |||
| 7b6e72438b | |||
| 8f78baddb1 | |||
| 0f8f67ec6f | |||
| 9298e7e073 | |||
| 7182c53df7 | |||
| 37222199c3 | |||
| 9c47733e16 | |||
| 139dab7cfe | |||
| d306a2d7e5 | |||
| 30d2fc8bfc | |||
| d80b5fc8ed | |||
| de5fd7b110 | |||
| 197b422a31 | |||
| 38017010ce | |||
| 984a834e81 | |||
| 2bcf08457b | |||
| 73dedcc765 | |||
| 94f6e83cfa | |||
| bc0c4ebaa7 | |||
| dc277618ee | |||
| b90c74a94d | |||
| ff532d9bf3 | |||
| 54497f4f6b | |||
| 9629d873fb | |||
| 643fe38b1e | |||
| c827a08c11 | |||
| 1b6556c2f6 | |||
| 859e94d67a | |||
| 50749d131b | |||
| 9986dc0c38 | |||
| 92b63871ca | |||
| 303e9ef7de | |||
| 1ebc217624 | |||
| 06dbadab11 | |||
| 566146af72 | |||
| 7e8fcb4b0f | |||
| dd28eef5b4 | |||
| 2f886ce8d0 | |||
| dcfd6d6f73 | |||
| b20fd6f2c1 | |||
| 2294c55bd9 | |||
| aad1b401c1 | |||
| a8294872a3 | |||
| 95a1ca898f | |||
| c3e3bb58f2 | |||
| 0d765f56f7 | |||
| 8b6f55c312 | |||
| 40fc337659 | |||
| f6df4c0ed8 | |||
| 924afb753f | |||
| 45c43d4ec4 | |||
| a1e7e105a4 | |||
| cf93c34172 | |||
| 659f89e079 | |||
| e4e4669feb | |||
| a1fa58ac17 | |||
| 88b7372e7f | |||
| dec6d310a8 | |||
| e720afa9d0 | |||
| d18441debe | |||
| 4f7b279fd8 | |||
| c3cbd977f1 | |||
| d8b1ca70d6 | |||
| 5a3ceb38f0 | |||
| cadf1352b4 | |||
| ed3d6fa9e3 | |||
| d9c40d61c2 | |||
| 90b64c5e04 | |||
| d0fb4599f0 | |||
| ffe22a0ffc | |||
| a1161b79a7 | |||
| dd92fbc83c | |||
| 10d2844cce | |||
| 277b2d21ca | |||
| af3ff825a1 | |||
| 50ba223aa1 | |||
| 6318520501 | |||
| f3f83540df | |||
| 10ef5933f4 | |||
| c8538769a2 | |||
| 5743149396 | |||
| 9780d0295c | |||
| b89093031e | |||
| 87045fc27b | |||
| b8a0f97cab |
@@ -5,6 +5,13 @@ runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Setup sqlc
|
||||
uses: sqlc-dev/setup-sqlc@c0209b9199cd1cce6a14fc27cabcec491b651761 # v4.0.0
|
||||
with:
|
||||
sqlc-version: "1.27.0"
|
||||
# uses: sqlc-dev/setup-sqlc@c0209b9199cd1cce6a14fc27cabcec491b651761 # v4.0.0
|
||||
# with:
|
||||
# sqlc-version: "1.30.0"
|
||||
|
||||
# Switched to coder/sqlc fork to fix ambiguous column bug, see:
|
||||
# - https://github.com/coder/sqlc/pull/1
|
||||
# - https://github.com/sqlc-dev/sqlc/pull/4159
|
||||
shell: bash
|
||||
run: |
|
||||
CGO_ENABLED=1 go install github.com/coder/sqlc/cmd/sqlc@aab4e865a51df0c43e1839f81a9d349b41d14f05
|
||||
|
||||
@@ -7,5 +7,5 @@ runs:
|
||||
- name: Install Terraform
|
||||
uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3.1.2
|
||||
with:
|
||||
terraform_version: 1.13.0
|
||||
terraform_version: 1.13.4
|
||||
terraform_wrapper: false
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
app = "sao-paulo-coder"
|
||||
primary_region = "gru"
|
||||
|
||||
[experimental]
|
||||
entrypoint = ["/bin/sh", "-c", "CODER_DERP_SERVER_RELAY_URL=\"http://[${FLY_PRIVATE_IP}]:3000\" /opt/coder wsproxy server"]
|
||||
auto_rollback = true
|
||||
|
||||
[build]
|
||||
image = "ghcr.io/coder/coder-preview:main"
|
||||
|
||||
[env]
|
||||
CODER_ACCESS_URL = "https://sao-paulo.fly.dev.coder.com"
|
||||
CODER_HTTP_ADDRESS = "0.0.0.0:3000"
|
||||
CODER_PRIMARY_ACCESS_URL = "https://dev.coder.com"
|
||||
CODER_WILDCARD_ACCESS_URL = "*--apps.sao-paulo.fly.dev.coder.com"
|
||||
CODER_VERBOSE = "true"
|
||||
|
||||
[http_service]
|
||||
internal_port = 3000
|
||||
force_https = true
|
||||
auto_stop_machines = true
|
||||
auto_start_machines = true
|
||||
min_machines_running = 0
|
||||
|
||||
# Ref: https://fly.io/docs/reference/configuration/#http_service-concurrency
|
||||
[http_service.concurrency]
|
||||
type = "requests"
|
||||
soft_limit = 50
|
||||
hard_limit = 100
|
||||
|
||||
[[vm]]
|
||||
cpu_kind = "shared"
|
||||
cpus = 2
|
||||
memory_mb = 512
|
||||
+13
-25
@@ -230,7 +230,7 @@ jobs:
|
||||
shell: bash
|
||||
|
||||
gen:
|
||||
timeout-minutes: 8
|
||||
timeout-minutes: 20
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
if: ${{ !cancelled() }}
|
||||
steps:
|
||||
@@ -271,6 +271,7 @@ jobs:
|
||||
popd
|
||||
|
||||
- name: make gen
|
||||
timeout-minutes: 8
|
||||
run: |
|
||||
# Remove golden files to detect discrepancy in generated files.
|
||||
make clean/golden-files
|
||||
@@ -288,7 +289,7 @@ jobs:
|
||||
needs: changes
|
||||
if: needs.changes.outputs.offlinedocs-only == 'false' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
timeout-minutes: 7
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
@@ -315,6 +316,7 @@ jobs:
|
||||
run: go install mvdan.cc/sh/v3/cmd/shfmt@v3.7.0
|
||||
|
||||
- name: make fmt
|
||||
timeout-minutes: 7
|
||||
run: |
|
||||
PATH="${PATH}:$(go env GOPATH)/bin" \
|
||||
make --output-sync -j -B fmt
|
||||
@@ -376,13 +378,6 @@ jobs:
|
||||
id: go-paths
|
||||
uses: ./.github/actions/setup-go-paths
|
||||
|
||||
- name: Download Go Build Cache
|
||||
id: download-go-build-cache
|
||||
uses: ./.github/actions/test-cache/download
|
||||
with:
|
||||
key-prefix: test-go-build-${{ runner.os }}-${{ runner.arch }}
|
||||
cache-path: ${{ steps.go-paths.outputs.cached-dirs }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
with:
|
||||
@@ -390,8 +385,7 @@ jobs:
|
||||
# download the toolchain configured in go.mod, so we don't
|
||||
# need to reinstall it. It's faster on Windows runners.
|
||||
use-preinstalled-go: ${{ runner.os == 'Windows' }}
|
||||
# Cache is already downloaded above
|
||||
use-cache: false
|
||||
use-cache: true
|
||||
|
||||
- name: Setup Terraform
|
||||
uses: ./.github/actions/setup-tf
|
||||
@@ -500,17 +494,11 @@ jobs:
|
||||
make test
|
||||
|
||||
- name: Upload failed test db dumps
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: failed-test-db-dump-${{matrix.os}}
|
||||
path: "**/*.test.sql"
|
||||
|
||||
- name: Upload Go Build Cache
|
||||
uses: ./.github/actions/test-cache/upload
|
||||
with:
|
||||
cache-key: ${{ steps.download-go-build-cache.outputs.cache-key }}
|
||||
cache-path: ${{ steps.go-paths.outputs.cached-dirs }}
|
||||
|
||||
- name: Upload Test Cache
|
||||
uses: ./.github/actions/test-cache/upload
|
||||
with:
|
||||
@@ -762,7 +750,7 @@ jobs:
|
||||
|
||||
- name: Upload Playwright Failed Tests
|
||||
if: always() && github.actor != 'dependabot[bot]' && runner.os == 'Linux' && !github.event.pull_request.head.repo.fork
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: failed-test-videos${{ matrix.variant.premium && '-premium' || '' }}
|
||||
path: ./site/test-results/**/*.webm
|
||||
@@ -770,7 +758,7 @@ jobs:
|
||||
|
||||
- name: Upload pprof dumps
|
||||
if: always() && github.actor != 'dependabot[bot]' && runner.os == 'Linux' && !github.event.pull_request.head.repo.fork
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: debug-pprof-dumps${{ matrix.variant.premium && '-premium' || '' }}
|
||||
path: ./site/test-results/**/debug-pprof-*.txt
|
||||
@@ -806,7 +794,7 @@ jobs:
|
||||
# the check to pass. This is desired in PRs, but not in mainline.
|
||||
- name: Publish to Chromatic (non-mainline)
|
||||
if: github.ref != 'refs/heads/main' && github.repository_owner == 'coder'
|
||||
uses: chromaui/action@4ffe736a2a8262ea28067ff05a13b635ba31ec05 # v13.3.0
|
||||
uses: chromaui/action@bc2d84ad2b60813a67d995c5582d696104a19383 # v13.3.2
|
||||
env:
|
||||
NODE_OPTIONS: "--max_old_space_size=4096"
|
||||
STORYBOOK: true
|
||||
@@ -838,7 +826,7 @@ jobs:
|
||||
# infinitely "in progress" in mainline unless we re-review each build.
|
||||
- name: Publish to Chromatic (mainline)
|
||||
if: github.ref == 'refs/heads/main' && github.repository_owner == 'coder'
|
||||
uses: chromaui/action@4ffe736a2a8262ea28067ff05a13b635ba31ec05 # v13.3.0
|
||||
uses: chromaui/action@bc2d84ad2b60813a67d995c5582d696104a19383 # v13.3.2
|
||||
env:
|
||||
NODE_OPTIONS: "--max_old_space_size=4096"
|
||||
STORYBOOK: true
|
||||
@@ -1036,7 +1024,7 @@ jobs:
|
||||
|
||||
- name: Upload build artifacts
|
||||
if: ${{ github.repository_owner == 'coder' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release/')) }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: dylibs
|
||||
path: |
|
||||
@@ -1201,7 +1189,7 @@ jobs:
|
||||
uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db # v3.0.1
|
||||
|
||||
- name: Download dylibs
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: dylibs
|
||||
path: ./build
|
||||
@@ -1468,7 +1456,7 @@ jobs:
|
||||
|
||||
- name: Upload build artifacts
|
||||
if: github.ref == 'refs/heads/main'
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: coder
|
||||
path: |
|
||||
|
||||
@@ -163,12 +163,10 @@ jobs:
|
||||
run: |
|
||||
flyctl deploy --image "$IMAGE" --app paris-coder --config ./.github/fly-wsproxies/paris-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_PARIS" --yes
|
||||
flyctl deploy --image "$IMAGE" --app sydney-coder --config ./.github/fly-wsproxies/sydney-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_SYDNEY" --yes
|
||||
flyctl deploy --image "$IMAGE" --app sao-paulo-coder --config ./.github/fly-wsproxies/sao-paulo-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_SAO_PAULO" --yes
|
||||
flyctl deploy --image "$IMAGE" --app jnb-coder --config ./.github/fly-wsproxies/jnb-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_JNB" --yes
|
||||
env:
|
||||
FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }}
|
||||
IMAGE: ${{ inputs.image }}
|
||||
TOKEN_PARIS: ${{ secrets.FLY_PARIS_CODER_PROXY_SESSION_TOKEN }}
|
||||
TOKEN_SYDNEY: ${{ secrets.FLY_SYDNEY_CODER_PROXY_SESSION_TOKEN }}
|
||||
TOKEN_SAO_PAULO: ${{ secrets.FLY_SAO_PAULO_CODER_PROXY_SESSION_TOKEN }}
|
||||
TOKEN_JNB: ${{ secrets.FLY_JNB_CODER_PROXY_SESSION_TOKEN }}
|
||||
|
||||
@@ -30,7 +30,7 @@ jobs:
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- uses: tj-actions/changed-files@d03a93c0dbfac6d6dd6a0d8a5e7daff992b07449 # v45.0.7
|
||||
- uses: tj-actions/changed-files@dbf178ceecb9304128c8e0648591d71208c6e2c9 # v45.0.7
|
||||
id: changed-files
|
||||
with:
|
||||
files: |
|
||||
|
||||
@@ -36,11 +36,11 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup Nix
|
||||
uses: nixbuild/nix-quick-install-action@1f095fee853b33114486cfdeae62fa099cda35a9 # v33
|
||||
uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34
|
||||
with:
|
||||
# Pinning to 2.28 here, as Nix gets a "error: [json.exception.type_error.302] type must be array, but is string"
|
||||
# on version 2.29 and above.
|
||||
nix_version: "2.28.4"
|
||||
nix_version: "2.28.5"
|
||||
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
with:
|
||||
|
||||
@@ -131,7 +131,7 @@ jobs:
|
||||
AC_CERTIFICATE_PASSWORD_FILE: /tmp/apple_cert_password.txt
|
||||
|
||||
- name: Upload build artifacts
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: dylibs
|
||||
path: |
|
||||
@@ -327,7 +327,7 @@ jobs:
|
||||
uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db # v3.0.1
|
||||
|
||||
- name: Download dylibs
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: dylibs
|
||||
path: ./build
|
||||
@@ -761,7 +761,7 @@ jobs:
|
||||
|
||||
- name: Upload artifacts to actions (if dry-run)
|
||||
if: ${{ inputs.dry_run }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: release-artifacts
|
||||
path: |
|
||||
@@ -777,7 +777,7 @@ jobs:
|
||||
|
||||
- name: Upload latest sbom artifact to actions (if dry-run)
|
||||
if: inputs.dry_run && steps.build_docker.outputs.created_latest_tag == 'true'
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: latest-sbom-artifact
|
||||
path: ./coder_latest_sbom.spdx.json
|
||||
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
|
||||
# Upload the results as artifacts.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
@@ -47,6 +47,6 @@ jobs:
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@16140ae1a102900babc80a33c44059580f687047 # v3.29.5
|
||||
uses: github/codeql-action/upload-sarif@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v3.29.5
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@16140ae1a102900babc80a33c44059580f687047 # v3.29.5
|
||||
uses: github/codeql-action/init@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v3.29.5
|
||||
with:
|
||||
languages: go, javascript
|
||||
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
rm Makefile
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@16140ae1a102900babc80a33c44059580f687047 # v3.29.5
|
||||
uses: github/codeql-action/analyze@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v3.29.5
|
||||
|
||||
- name: Send Slack notification on failure
|
||||
if: ${{ failure() }}
|
||||
@@ -154,13 +154,13 @@ jobs:
|
||||
severity: "CRITICAL,HIGH"
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@16140ae1a102900babc80a33c44059580f687047 # v3.29.5
|
||||
uses: github/codeql-action/upload-sarif@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v3.29.5
|
||||
with:
|
||||
sarif_file: trivy-results.sarif
|
||||
category: "Trivy"
|
||||
|
||||
- name: Upload Trivy scan results as an artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: trivy
|
||||
path: trivy-results.sarif
|
||||
|
||||
@@ -125,7 +125,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Delete PR Cleanup workflow runs
|
||||
uses: Mattraks/delete-workflow-runs@39f0bbed25d76b34de5594dceab824811479e5de # v2.0.6
|
||||
uses: Mattraks/delete-workflow-runs@ab482449ba468316e9a8801e092d0405715c5e6d # v2.1.0
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
repository: ${{ github.repository }}
|
||||
@@ -134,7 +134,7 @@ jobs:
|
||||
delete_workflow_pattern: pr-cleanup.yaml
|
||||
|
||||
- name: Delete PR Deploy workflow skipped runs
|
||||
uses: Mattraks/delete-workflow-runs@39f0bbed25d76b34de5594dceab824811479e5de # v2.0.6
|
||||
uses: Mattraks/delete-workflow-runs@ab482449ba468316e9a8801e092d0405715c5e6d # v2.1.0
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
repository: ${{ github.repository }}
|
||||
|
||||
@@ -89,3 +89,5 @@ result
|
||||
__debug_bin*
|
||||
|
||||
**/.claude/settings.local.json
|
||||
|
||||
/.env
|
||||
|
||||
-12
@@ -18,18 +18,6 @@ coderd/rbac/ @Emyrk
|
||||
scripts/apitypings/ @Emyrk
|
||||
scripts/gensite/ @aslilac
|
||||
|
||||
site/ @aslilac @Parkreiner
|
||||
site/src/hooks/ @Parkreiner
|
||||
# These rules intentionally do not specify any owners. More specific rules
|
||||
# override less specific rules, so these files are "ignored" by the site/ rule.
|
||||
site/e2e/google/protobuf/timestampGenerated.ts
|
||||
site/e2e/provisionerGenerated.ts
|
||||
site/src/api/countriesGenerated.ts
|
||||
site/src/api/rbacresourcesGenerated.ts
|
||||
site/src/api/typesGenerated.ts
|
||||
site/src/testHelpers/entities.ts
|
||||
site/CLAUDE.md
|
||||
|
||||
# The blood and guts of the autostop algorithm, which is quite complex and
|
||||
# requires elite ball knowledge of most of the scheduling code to make changes
|
||||
# without inadvertently affecting other parts of the codebase.
|
||||
|
||||
@@ -636,8 +636,8 @@ TAILNETTEST_MOCKS := \
|
||||
tailnet/tailnettest/subscriptionmock.go
|
||||
|
||||
AIBRIDGED_MOCKS := \
|
||||
enterprise/x/aibridged/aibridgedmock/clientmock.go \
|
||||
enterprise/x/aibridged/aibridgedmock/poolmock.go
|
||||
enterprise/aibridged/aibridgedmock/clientmock.go \
|
||||
enterprise/aibridged/aibridgedmock/poolmock.go
|
||||
|
||||
GEN_FILES := \
|
||||
tailnet/proto/tailnet.pb.go \
|
||||
@@ -645,7 +645,7 @@ GEN_FILES := \
|
||||
provisionersdk/proto/provisioner.pb.go \
|
||||
provisionerd/proto/provisionerd.pb.go \
|
||||
vpn/vpn.pb.go \
|
||||
enterprise/x/aibridged/proto/aibridged.pb.go \
|
||||
enterprise/aibridged/proto/aibridged.pb.go \
|
||||
$(DB_GEN_FILES) \
|
||||
$(SITE_GEN_FILES) \
|
||||
coderd/rbac/object_gen.go \
|
||||
@@ -697,7 +697,7 @@ gen/mark-fresh:
|
||||
provisionersdk/proto/provisioner.pb.go \
|
||||
provisionerd/proto/provisionerd.pb.go \
|
||||
vpn/vpn.pb.go \
|
||||
enterprise/x/aibridged/proto/aibridged.pb.go \
|
||||
enterprise/aibridged/proto/aibridged.pb.go \
|
||||
coderd/database/dump.sql \
|
||||
$(DB_GEN_FILES) \
|
||||
site/src/api/typesGenerated.ts \
|
||||
@@ -768,8 +768,8 @@ codersdk/workspacesdk/agentconnmock/agentconnmock.go: codersdk/workspacesdk/agen
|
||||
go generate ./codersdk/workspacesdk/agentconnmock/
|
||||
touch "$@"
|
||||
|
||||
$(AIBRIDGED_MOCKS): enterprise/x/aibridged/client.go enterprise/x/aibridged/pool.go
|
||||
go generate ./enterprise/x/aibridged/aibridgedmock/
|
||||
$(AIBRIDGED_MOCKS): enterprise/aibridged/client.go enterprise/aibridged/pool.go
|
||||
go generate ./enterprise/aibridged/aibridgedmock/
|
||||
touch "$@"
|
||||
|
||||
agent/agentcontainers/dcspec/dcspec_gen.go: \
|
||||
@@ -822,13 +822,13 @@ vpn/vpn.pb.go: vpn/vpn.proto
|
||||
--go_opt=paths=source_relative \
|
||||
./vpn/vpn.proto
|
||||
|
||||
enterprise/x/aibridged/proto/aibridged.pb.go: enterprise/x/aibridged/proto/aibridged.proto
|
||||
enterprise/aibridged/proto/aibridged.pb.go: enterprise/aibridged/proto/aibridged.proto
|
||||
protoc \
|
||||
--go_out=. \
|
||||
--go_opt=paths=source_relative \
|
||||
--go-drpc_out=. \
|
||||
--go-drpc_opt=paths=source_relative \
|
||||
./enterprise/x/aibridged/proto/aibridged.proto
|
||||
./enterprise/aibridged/proto/aibridged.proto
|
||||
|
||||
site/src/api/typesGenerated.ts: site/node_modules/.installed $(wildcard scripts/apitypings/*) $(shell find ./codersdk $(FIND_EXCLUSIONS) -type f -name '*.go')
|
||||
# -C sets the directory for the go run command
|
||||
@@ -1182,3 +1182,8 @@ endif
|
||||
|
||||
dogfood/coder/nix.hash: flake.nix flake.lock
|
||||
sha256sum flake.nix flake.lock >./dogfood/coder/nix.hash
|
||||
|
||||
# Count the number of test databases created per test package.
|
||||
count-test-databases:
|
||||
PGPASSWORD=postgres psql -h localhost -U postgres -d coder_testing -P pager=off -c 'SELECT test_package, count(*) as count from test_databases GROUP BY test_package ORDER BY count DESC'
|
||||
.PHONY: count-test-databases
|
||||
|
||||
@@ -682,8 +682,6 @@ func (api *API) updaterLoop() {
|
||||
} else {
|
||||
prevErr = nil
|
||||
}
|
||||
default:
|
||||
api.logger.Debug(api.ctx, "updater loop ticker skipped, update in progress")
|
||||
}
|
||||
|
||||
return nil // Always nil to keep the ticker going.
|
||||
|
||||
+3
-1
@@ -250,7 +250,9 @@ func (a *agent) editFile(ctx context.Context, path string, edits []workspacesdk.
|
||||
transforms[i] = replace.String(edit.Search, edit.Replace)
|
||||
}
|
||||
|
||||
tmpfile, err := afero.TempFile(a.filesystem, "", filepath.Base(path))
|
||||
// Create an adjacent file to ensure it will be on the same device and can be
|
||||
// moved atomically.
|
||||
tmpfile, err := afero.TempFile(a.filesystem, filepath.Dir(path), filepath.Base(path))
|
||||
if err != nil {
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
|
||||
@@ -0,0 +1,78 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
var (
|
||||
_ pflag.SliceValue = &AllowListFlag{}
|
||||
_ pflag.Value = &AllowListFlag{}
|
||||
)
|
||||
|
||||
// AllowListFlag implements pflag.SliceValue for codersdk.APIAllowListTarget entries.
|
||||
type AllowListFlag []codersdk.APIAllowListTarget
|
||||
|
||||
func AllowListFlagOf(al *[]codersdk.APIAllowListTarget) *AllowListFlag {
|
||||
return (*AllowListFlag)(al)
|
||||
}
|
||||
|
||||
func (a AllowListFlag) String() string {
|
||||
return strings.Join(a.GetSlice(), ",")
|
||||
}
|
||||
|
||||
func (a AllowListFlag) Value() []codersdk.APIAllowListTarget {
|
||||
return []codersdk.APIAllowListTarget(a)
|
||||
}
|
||||
|
||||
func (AllowListFlag) Type() string { return "allow-list" }
|
||||
|
||||
func (a *AllowListFlag) Set(set string) error {
|
||||
values, err := csv.NewReader(strings.NewReader(set)).Read()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse allow list entries as csv: %w", err)
|
||||
}
|
||||
for _, v := range values {
|
||||
if err := a.Append(v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AllowListFlag) Append(value string) error {
|
||||
value = strings.TrimSpace(value)
|
||||
if value == "" {
|
||||
return xerrors.New("allow list entry cannot be empty")
|
||||
}
|
||||
var target codersdk.APIAllowListTarget
|
||||
if err := target.UnmarshalText([]byte(value)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*a = append(*a, target)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AllowListFlag) Replace(items []string) error {
|
||||
*a = []codersdk.APIAllowListTarget{}
|
||||
for _, item := range items {
|
||||
if err := a.Append(item); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AllowListFlag) GetSlice() []string {
|
||||
out := make([]string, len(*a))
|
||||
for i, entry := range *a {
|
||||
out[i] = entry.String()
|
||||
}
|
||||
return out
|
||||
}
|
||||
+86
-59
@@ -384,6 +384,88 @@ func (s *scaletestPrometheusFlags) attach(opts *serpent.OptionSet) {
|
||||
)
|
||||
}
|
||||
|
||||
// workspaceTargetFlags holds common flags for targeting specific workspaces in scale tests.
|
||||
type workspaceTargetFlags struct {
|
||||
template string
|
||||
targetWorkspaces string
|
||||
useHostLogin bool
|
||||
}
|
||||
|
||||
// attach adds the workspace target flags to the given options set.
|
||||
func (f *workspaceTargetFlags) attach(opts *serpent.OptionSet) {
|
||||
*opts = append(*opts,
|
||||
serpent.Option{
|
||||
Flag: "template",
|
||||
FlagShorthand: "t",
|
||||
Env: "CODER_SCALETEST_TEMPLATE",
|
||||
Description: "Name or ID of the template. Traffic generation will be limited to workspaces created from this template.",
|
||||
Value: serpent.StringOf(&f.template),
|
||||
},
|
||||
serpent.Option{
|
||||
Flag: "target-workspaces",
|
||||
Env: "CODER_SCALETEST_TARGET_WORKSPACES",
|
||||
Description: "Target a specific range of workspaces in the format [START]:[END] (exclusive). Example: 0:10 will target the 10 first alphabetically sorted workspaces (0-9).",
|
||||
Value: serpent.StringOf(&f.targetWorkspaces),
|
||||
},
|
||||
serpent.Option{
|
||||
Flag: "use-host-login",
|
||||
Env: "CODER_SCALETEST_USE_HOST_LOGIN",
|
||||
Default: "false",
|
||||
Description: "Connect as the currently logged in user.",
|
||||
Value: serpent.BoolOf(&f.useHostLogin),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// getTargetedWorkspaces retrieves the workspaces based on the template filter and target range. warnWriter is where to
|
||||
// write a warning message if any workspaces were skipped due to ownership mismatch.
|
||||
func (f *workspaceTargetFlags) getTargetedWorkspaces(ctx context.Context, client *codersdk.Client, organizationIDs []uuid.UUID, warnWriter io.Writer) ([]codersdk.Workspace, error) {
|
||||
// Validate template if provided
|
||||
if f.template != "" {
|
||||
_, err := parseTemplate(ctx, client, organizationIDs, f.template)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse template: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Parse target range
|
||||
targetStart, targetEnd, err := parseTargetRange("workspaces", f.targetWorkspaces)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse target workspaces: %w", err)
|
||||
}
|
||||
|
||||
// Determine owner based on useHostLogin
|
||||
var owner string
|
||||
if f.useHostLogin {
|
||||
owner = codersdk.Me
|
||||
}
|
||||
|
||||
// Get workspaces
|
||||
workspaces, numSkipped, err := getScaletestWorkspaces(ctx, client, owner, f.template)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if numSkipped > 0 {
|
||||
cliui.Warnf(warnWriter, "CODER_DISABLE_OWNER_WORKSPACE_ACCESS is set on the deployment.\n\t%d workspace(s) were skipped due to ownership mismatch.\n\tSet --use-host-login to only target workspaces you own.", numSkipped)
|
||||
}
|
||||
|
||||
// Adjust targetEnd if not specified
|
||||
if targetEnd == 0 {
|
||||
targetEnd = len(workspaces)
|
||||
}
|
||||
|
||||
// Validate range
|
||||
if len(workspaces) == 0 {
|
||||
return nil, xerrors.Errorf("no scaletest workspaces exist")
|
||||
}
|
||||
if targetEnd > len(workspaces) {
|
||||
return nil, xerrors.Errorf("target workspace end %d is greater than the number of workspaces %d", targetEnd, len(workspaces))
|
||||
}
|
||||
|
||||
// Return the sliced workspaces
|
||||
return workspaces[targetStart:targetEnd], nil
|
||||
}
|
||||
|
||||
func requireAdmin(ctx context.Context, client *codersdk.Client) (codersdk.User, error) {
|
||||
me, err := client.User(ctx, codersdk.Me)
|
||||
if err != nil {
|
||||
@@ -1193,12 +1275,10 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
bytesPerTick int64
|
||||
ssh bool
|
||||
disableDirect bool
|
||||
useHostLogin bool
|
||||
app string
|
||||
template string
|
||||
targetWorkspaces string
|
||||
workspaceProxyURL string
|
||||
|
||||
targetFlags = &workspaceTargetFlags{}
|
||||
tracingFlags = &scaletestTracingFlags{}
|
||||
strategy = &scaletestStrategyFlags{}
|
||||
cleanupStrategy = newScaletestCleanupStrategy()
|
||||
@@ -1243,15 +1323,9 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
},
|
||||
}
|
||||
|
||||
if template != "" {
|
||||
_, err := parseTemplate(ctx, client, me.OrganizationIDs, template)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse template: %w", err)
|
||||
}
|
||||
}
|
||||
targetWorkspaceStart, targetWorkspaceEnd, err := parseTargetRange("workspaces", targetWorkspaces)
|
||||
workspaces, err := targetFlags.getTargetedWorkspaces(ctx, client, me.OrganizationIDs, inv.Stdout)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse target workspaces: %w", err)
|
||||
return err
|
||||
}
|
||||
|
||||
appHost, err := client.AppHost(ctx)
|
||||
@@ -1259,30 +1333,6 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
return xerrors.Errorf("get app host: %w", err)
|
||||
}
|
||||
|
||||
var owner string
|
||||
if useHostLogin {
|
||||
owner = codersdk.Me
|
||||
}
|
||||
|
||||
workspaces, numSkipped, err := getScaletestWorkspaces(inv.Context(), client, owner, template)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if numSkipped > 0 {
|
||||
cliui.Warnf(inv.Stdout, "CODER_DISABLE_OWNER_WORKSPACE_ACCESS is set on the deployment.\n\t%d workspace(s) were skipped due to ownership mismatch.\n\tSet --use-host-login to only target workspaces you own.", numSkipped)
|
||||
}
|
||||
|
||||
if targetWorkspaceEnd == 0 {
|
||||
targetWorkspaceEnd = len(workspaces)
|
||||
}
|
||||
|
||||
if len(workspaces) == 0 {
|
||||
return xerrors.Errorf("no scaletest workspaces exist")
|
||||
}
|
||||
if targetWorkspaceEnd > len(workspaces) {
|
||||
return xerrors.Errorf("target workspace end %d is greater than the number of workspaces %d", targetWorkspaceEnd, len(workspaces))
|
||||
}
|
||||
|
||||
tracerProvider, closeTracing, tracingEnabled, err := tracingFlags.provider(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create tracer provider: %w", err)
|
||||
@@ -1307,10 +1357,6 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
|
||||
th := harness.NewTestHarness(strategy.toStrategy(), cleanupStrategy.toStrategy())
|
||||
for idx, ws := range workspaces {
|
||||
if idx < targetWorkspaceStart || idx >= targetWorkspaceEnd {
|
||||
continue
|
||||
}
|
||||
|
||||
var (
|
||||
agent codersdk.WorkspaceAgent
|
||||
name = "workspace-traffic"
|
||||
@@ -1415,19 +1461,6 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
}
|
||||
|
||||
cmd.Options = []serpent.Option{
|
||||
{
|
||||
Flag: "template",
|
||||
FlagShorthand: "t",
|
||||
Env: "CODER_SCALETEST_TEMPLATE",
|
||||
Description: "Name or ID of the template. Traffic generation will be limited to workspaces created from this template.",
|
||||
Value: serpent.StringOf(&template),
|
||||
},
|
||||
{
|
||||
Flag: "target-workspaces",
|
||||
Env: "CODER_SCALETEST_TARGET_WORKSPACES",
|
||||
Description: "Target a specific range of workspaces in the format [START]:[END] (exclusive). Example: 0:10 will target the 10 first alphabetically sorted workspaces (0-9).",
|
||||
Value: serpent.StringOf(&targetWorkspaces),
|
||||
},
|
||||
{
|
||||
Flag: "bytes-per-tick",
|
||||
Env: "CODER_SCALETEST_WORKSPACE_TRAFFIC_BYTES_PER_TICK",
|
||||
@@ -1463,13 +1496,6 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
Description: "Send WebSocket traffic to a workspace app (proxied via coderd), cannot be used with --ssh.",
|
||||
Value: serpent.StringOf(&app),
|
||||
},
|
||||
{
|
||||
Flag: "use-host-login",
|
||||
Env: "CODER_SCALETEST_USE_HOST_LOGIN",
|
||||
Default: "false",
|
||||
Description: "Connect as the currently logged in user.",
|
||||
Value: serpent.BoolOf(&useHostLogin),
|
||||
},
|
||||
{
|
||||
Flag: "workspace-proxy-url",
|
||||
Env: "CODER_SCALETEST_WORKSPACE_PROXY_URL",
|
||||
@@ -1479,6 +1505,7 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
},
|
||||
}
|
||||
|
||||
targetFlags.attach(&cmd.Options)
|
||||
tracingFlags.attach(&cmd.Options)
|
||||
strategy.attach(&cmd.Options)
|
||||
cleanupStrategy.attach(&cmd.Options)
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@@ -29,12 +30,13 @@ import (
|
||||
|
||||
func (r *RootCmd) scaletestNotifications() *serpent.Command {
|
||||
var (
|
||||
userCount int64
|
||||
ownerUserPercentage float64
|
||||
notificationTimeout time.Duration
|
||||
dialTimeout time.Duration
|
||||
noCleanup bool
|
||||
smtpAPIURL string
|
||||
userCount int64
|
||||
templateAdminPercentage float64
|
||||
notificationTimeout time.Duration
|
||||
smtpRequestTimeout time.Duration
|
||||
dialTimeout time.Duration
|
||||
noCleanup bool
|
||||
smtpAPIURL string
|
||||
|
||||
tracingFlags = &scaletestTracingFlags{}
|
||||
|
||||
@@ -77,24 +79,24 @@ func (r *RootCmd) scaletestNotifications() *serpent.Command {
|
||||
return xerrors.Errorf("--user-count must be greater than 0")
|
||||
}
|
||||
|
||||
if ownerUserPercentage < 0 || ownerUserPercentage > 100 {
|
||||
return xerrors.Errorf("--owner-user-percentage must be between 0 and 100")
|
||||
if templateAdminPercentage < 0 || templateAdminPercentage > 100 {
|
||||
return xerrors.Errorf("--template-admin-percentage must be between 0 and 100")
|
||||
}
|
||||
|
||||
if smtpAPIURL != "" && !strings.HasPrefix(smtpAPIURL, "http://") && !strings.HasPrefix(smtpAPIURL, "https://") {
|
||||
return xerrors.Errorf("--smtp-api-url must start with http:// or https://")
|
||||
}
|
||||
|
||||
ownerUserCount := int64(float64(userCount) * ownerUserPercentage / 100)
|
||||
if ownerUserCount == 0 && ownerUserPercentage > 0 {
|
||||
ownerUserCount = 1
|
||||
templateAdminCount := int64(float64(userCount) * templateAdminPercentage / 100)
|
||||
if templateAdminCount == 0 && templateAdminPercentage > 0 {
|
||||
templateAdminCount = 1
|
||||
}
|
||||
regularUserCount := userCount - ownerUserCount
|
||||
regularUserCount := userCount - templateAdminCount
|
||||
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "Distribution plan:\n")
|
||||
_, _ = fmt.Fprintf(inv.Stderr, " Total users: %d\n", userCount)
|
||||
_, _ = fmt.Fprintf(inv.Stderr, " Owner users: %d (%.1f%%)\n", ownerUserCount, ownerUserPercentage)
|
||||
_, _ = fmt.Fprintf(inv.Stderr, " Regular users: %d (%.1f%%)\n", regularUserCount, 100.0-ownerUserPercentage)
|
||||
_, _ = fmt.Fprintf(inv.Stderr, " Template admins: %d (%.1f%%)\n", templateAdminCount, templateAdminPercentage)
|
||||
_, _ = fmt.Fprintf(inv.Stderr, " Regular users: %d (%.1f%%)\n", regularUserCount, 100.0-templateAdminPercentage)
|
||||
|
||||
outputs, err := output.parse()
|
||||
if err != nil {
|
||||
@@ -127,13 +129,12 @@ func (r *RootCmd) scaletestNotifications() *serpent.Command {
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "Creating users...")
|
||||
|
||||
dialBarrier := &sync.WaitGroup{}
|
||||
ownerWatchBarrier := &sync.WaitGroup{}
|
||||
templateAdminWatchBarrier := &sync.WaitGroup{}
|
||||
dialBarrier.Add(int(userCount))
|
||||
ownerWatchBarrier.Add(int(ownerUserCount))
|
||||
templateAdminWatchBarrier.Add(int(templateAdminCount))
|
||||
|
||||
expectedNotificationIDs := map[uuid.UUID]struct{}{
|
||||
notificationsLib.TemplateUserAccountCreated: {},
|
||||
notificationsLib.TemplateUserAccountDeleted: {},
|
||||
notificationsLib.TemplateTemplateDeleted: {},
|
||||
}
|
||||
|
||||
triggerTimes := make(map[uuid.UUID]chan time.Time, len(expectedNotificationIDs))
|
||||
@@ -142,19 +143,20 @@ func (r *RootCmd) scaletestNotifications() *serpent.Command {
|
||||
}
|
||||
|
||||
configs := make([]notifications.Config, 0, userCount)
|
||||
for range ownerUserCount {
|
||||
for range templateAdminCount {
|
||||
config := notifications.Config{
|
||||
User: createusers.Config{
|
||||
OrganizationID: me.OrganizationIDs[0],
|
||||
},
|
||||
Roles: []string{codersdk.RoleOwner},
|
||||
Roles: []string{codersdk.RoleTemplateAdmin},
|
||||
NotificationTimeout: notificationTimeout,
|
||||
DialTimeout: dialTimeout,
|
||||
DialBarrier: dialBarrier,
|
||||
ReceivingWatchBarrier: ownerWatchBarrier,
|
||||
ReceivingWatchBarrier: templateAdminWatchBarrier,
|
||||
ExpectedNotificationsIDs: expectedNotificationIDs,
|
||||
Metrics: metrics,
|
||||
SMTPApiURL: smtpAPIURL,
|
||||
SMTPRequestTimeout: smtpRequestTimeout,
|
||||
}
|
||||
if err := config.Validate(); err != nil {
|
||||
return xerrors.Errorf("validate config: %w", err)
|
||||
@@ -170,9 +172,8 @@ func (r *RootCmd) scaletestNotifications() *serpent.Command {
|
||||
NotificationTimeout: notificationTimeout,
|
||||
DialTimeout: dialTimeout,
|
||||
DialBarrier: dialBarrier,
|
||||
ReceivingWatchBarrier: ownerWatchBarrier,
|
||||
ReceivingWatchBarrier: templateAdminWatchBarrier,
|
||||
Metrics: metrics,
|
||||
SMTPApiURL: smtpAPIURL,
|
||||
}
|
||||
if err := config.Validate(); err != nil {
|
||||
return xerrors.Errorf("validate config: %w", err)
|
||||
@@ -180,7 +181,7 @@ func (r *RootCmd) scaletestNotifications() *serpent.Command {
|
||||
configs = append(configs, config)
|
||||
}
|
||||
|
||||
go triggerUserNotifications(
|
||||
go triggerNotifications(
|
||||
ctx,
|
||||
logger,
|
||||
client,
|
||||
@@ -261,23 +262,30 @@ func (r *RootCmd) scaletestNotifications() *serpent.Command {
|
||||
Required: true,
|
||||
},
|
||||
{
|
||||
Flag: "owner-user-percentage",
|
||||
Env: "CODER_SCALETEST_NOTIFICATION_OWNER_USER_PERCENTAGE",
|
||||
Flag: "template-admin-percentage",
|
||||
Env: "CODER_SCALETEST_NOTIFICATION_TEMPLATE_ADMIN_PERCENTAGE",
|
||||
Default: "20.0",
|
||||
Description: "Percentage of users to assign Owner role to (0-100).",
|
||||
Value: serpent.Float64Of(&ownerUserPercentage),
|
||||
Description: "Percentage of users to assign Template Admin role to (0-100).",
|
||||
Value: serpent.Float64Of(&templateAdminPercentage),
|
||||
},
|
||||
{
|
||||
Flag: "notification-timeout",
|
||||
Env: "CODER_SCALETEST_NOTIFICATION_TIMEOUT",
|
||||
Default: "5m",
|
||||
Default: "10m",
|
||||
Description: "How long to wait for notifications after triggering.",
|
||||
Value: serpent.DurationOf(¬ificationTimeout),
|
||||
},
|
||||
{
|
||||
Flag: "smtp-request-timeout",
|
||||
Env: "CODER_SCALETEST_SMTP_REQUEST_TIMEOUT",
|
||||
Default: "5m",
|
||||
Description: "Timeout for SMTP requests.",
|
||||
Value: serpent.DurationOf(&smtpRequestTimeout),
|
||||
},
|
||||
{
|
||||
Flag: "dial-timeout",
|
||||
Env: "CODER_SCALETEST_DIAL_TIMEOUT",
|
||||
Default: "2m",
|
||||
Default: "10m",
|
||||
Description: "Timeout for dialing the notification websocket endpoint.",
|
||||
Value: serpent.DurationOf(&dialTimeout),
|
||||
},
|
||||
@@ -379,9 +387,9 @@ func computeNotificationLatencies(
|
||||
return nil
|
||||
}
|
||||
|
||||
// triggerUserNotifications waits for all test users to connect,
|
||||
// then creates and deletes a test user to trigger notification events for testing.
|
||||
func triggerUserNotifications(
|
||||
// triggerNotifications waits for all test users to connect,
|
||||
// then creates and deletes a test template to trigger notification events for testing.
|
||||
func triggerNotifications(
|
||||
ctx context.Context,
|
||||
logger slog.Logger,
|
||||
client *codersdk.Client,
|
||||
@@ -414,34 +422,49 @@ func triggerUserNotifications(
|
||||
return
|
||||
}
|
||||
|
||||
const (
|
||||
triggerUsername = "scaletest-trigger-user"
|
||||
triggerEmail = "scaletest-trigger@example.com"
|
||||
)
|
||||
logger.Info(ctx, "creating test template to test notifications")
|
||||
|
||||
logger.Info(ctx, "creating test user to test notifications",
|
||||
slog.F("username", triggerUsername),
|
||||
slog.F("email", triggerEmail),
|
||||
slog.F("org_id", orgID))
|
||||
// Upload empty template file.
|
||||
file, err := client.Upload(ctx, codersdk.ContentTypeTar, bytes.NewReader([]byte{}))
|
||||
if err != nil {
|
||||
logger.Error(ctx, "upload test template", slog.Error(err))
|
||||
return
|
||||
}
|
||||
logger.Info(ctx, "test template uploaded", slog.F("file_id", file.ID))
|
||||
|
||||
testUser, err := client.CreateUserWithOrgs(ctx, codersdk.CreateUserRequestWithOrgs{
|
||||
OrganizationIDs: []uuid.UUID{orgID},
|
||||
Username: triggerUsername,
|
||||
Email: triggerEmail,
|
||||
Password: "test-password-123",
|
||||
// Create template version.
|
||||
version, err := client.CreateTemplateVersion(ctx, orgID, codersdk.CreateTemplateVersionRequest{
|
||||
StorageMethod: codersdk.ProvisionerStorageMethodFile,
|
||||
FileID: file.ID,
|
||||
Provisioner: codersdk.ProvisionerTypeEcho,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(ctx, "create test user", slog.Error(err))
|
||||
logger.Error(ctx, "create test template version", slog.Error(err))
|
||||
return
|
||||
}
|
||||
expectedNotifications[notificationsLib.TemplateUserAccountCreated] <- time.Now()
|
||||
logger.Info(ctx, "test template version created", slog.F("template_version_id", version.ID))
|
||||
|
||||
err = client.DeleteUser(ctx, testUser.ID)
|
||||
// Create template.
|
||||
testTemplate, err := client.CreateTemplate(ctx, orgID, codersdk.CreateTemplateRequest{
|
||||
Name: "scaletest-test-template",
|
||||
Description: "scaletest-test-template",
|
||||
VersionID: version.ID,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(ctx, "delete test user", slog.Error(err))
|
||||
logger.Error(ctx, "create test template", slog.Error(err))
|
||||
return
|
||||
}
|
||||
expectedNotifications[notificationsLib.TemplateUserAccountDeleted] <- time.Now()
|
||||
close(expectedNotifications[notificationsLib.TemplateUserAccountCreated])
|
||||
close(expectedNotifications[notificationsLib.TemplateUserAccountDeleted])
|
||||
logger.Info(ctx, "test template created", slog.F("template_id", testTemplate.ID))
|
||||
|
||||
// Delete template to trigger notification.
|
||||
err = client.DeleteTemplate(ctx, testTemplate.ID)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "delete test template", slog.Error(err))
|
||||
return
|
||||
}
|
||||
logger.Info(ctx, "test template deleted", slog.F("template_id", testTemplate.ID))
|
||||
|
||||
// Record expected notification.
|
||||
expectedNotifications[notificationsLib.TemplateTemplateDeleted] <- time.Now()
|
||||
close(expectedNotifications[notificationsLib.TemplateTemplateDeleted])
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package cli_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"io"
|
||||
@@ -19,10 +18,7 @@ import (
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbfake"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/coderd/util/slice"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
@@ -43,76 +39,22 @@ func makeAITask(t *testing.T, db database.Store, orgID, adminID, ownerID uuid.UU
|
||||
},
|
||||
}).Do()
|
||||
|
||||
ws := database.WorkspaceTable{
|
||||
build := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: orgID,
|
||||
OwnerID: ownerID,
|
||||
TemplateID: tv.Template.ID,
|
||||
}
|
||||
build := dbfake.WorkspaceBuild(t, db, ws).
|
||||
}).
|
||||
Seed(database.WorkspaceBuild{
|
||||
TemplateVersionID: tv.TemplateVersion.ID,
|
||||
Transition: transition,
|
||||
}).WithAgent().Do()
|
||||
dbgen.WorkspaceBuildParameters(t, db, []database.WorkspaceBuildParameter{
|
||||
{
|
||||
WorkspaceBuildID: build.Build.ID,
|
||||
Name: codersdk.AITaskPromptParameterName,
|
||||
Value: prompt,
|
||||
},
|
||||
})
|
||||
agents, err := db.GetWorkspaceAgentsByWorkspaceAndBuildNumber(
|
||||
dbauthz.AsSystemRestricted(context.Background()),
|
||||
database.GetWorkspaceAgentsByWorkspaceAndBuildNumberParams{
|
||||
WorkspaceID: build.Workspace.ID,
|
||||
BuildNumber: build.Build.BuildNumber,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, agents)
|
||||
agentID := agents[0].ID
|
||||
}).
|
||||
WithAgent().
|
||||
WithTask(database.TaskTable{
|
||||
Prompt: prompt,
|
||||
}, nil).
|
||||
Do()
|
||||
|
||||
// Create a workspace app and set it as the sidebar app.
|
||||
app := dbgen.WorkspaceApp(t, db, database.WorkspaceApp{
|
||||
AgentID: agentID,
|
||||
Slug: "task-sidebar",
|
||||
DisplayName: "Task Sidebar",
|
||||
External: false,
|
||||
})
|
||||
|
||||
// Update build flags to reference the sidebar app and HasAITask=true.
|
||||
err = db.UpdateWorkspaceBuildFlagsByID(
|
||||
dbauthz.AsSystemRestricted(context.Background()),
|
||||
database.UpdateWorkspaceBuildFlagsByIDParams{
|
||||
ID: build.Build.ID,
|
||||
HasAITask: sql.NullBool{Bool: true, Valid: true},
|
||||
HasExternalAgent: sql.NullBool{Bool: false, Valid: false},
|
||||
SidebarAppID: uuid.NullUUID{UUID: app.ID, Valid: true},
|
||||
UpdatedAt: build.Build.UpdatedAt,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a task record in the tasks table for the new data model.
|
||||
task := dbgen.Task(t, db, database.TaskTable{
|
||||
OrganizationID: orgID,
|
||||
OwnerID: ownerID,
|
||||
Name: build.Workspace.Name,
|
||||
WorkspaceID: uuid.NullUUID{UUID: build.Workspace.ID, Valid: true},
|
||||
TemplateVersionID: tv.TemplateVersion.ID,
|
||||
TemplateParameters: []byte("{}"),
|
||||
Prompt: prompt,
|
||||
CreatedAt: dbtime.Now(),
|
||||
})
|
||||
|
||||
// Link the task to the workspace app.
|
||||
dbgen.TaskWorkspaceApp(t, db, database.TaskWorkspaceApp{
|
||||
TaskID: task.ID,
|
||||
WorkspaceBuildNumber: build.Build.BuildNumber,
|
||||
WorkspaceAgentID: uuid.NullUUID{UUID: agentID, Valid: true},
|
||||
WorkspaceAppID: uuid.NullUUID{UUID: app.ID, Valid: true},
|
||||
})
|
||||
|
||||
return task
|
||||
return build.Task
|
||||
}
|
||||
|
||||
func TestExpTaskList(t *testing.T) {
|
||||
|
||||
@@ -293,7 +293,6 @@ func createAITaskTemplate(t *testing.T, client *codersdk.Client, orgID uuid.UUID
|
||||
{
|
||||
Type: &proto.Response_Plan{
|
||||
Plan: &proto.PlanComplete{
|
||||
Parameters: []*proto.RichParameter{{Name: codersdk.AITaskPromptParameterName, Type: "string"}},
|
||||
HasAiTasks: true,
|
||||
},
|
||||
},
|
||||
@@ -328,9 +327,7 @@ func createAITaskTemplate(t *testing.T, client *codersdk.Client, orgID uuid.UUID
|
||||
},
|
||||
AiTasks: []*proto.AITask{
|
||||
{
|
||||
SidebarApp: &proto.AITaskSidebarApp{
|
||||
Id: taskAppID.String(),
|
||||
},
|
||||
AppId: taskAppID.String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -0,0 +1,355 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/cli"
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
)
|
||||
|
||||
// mockKeyring is a mock sessionstore.Backend implementation.
|
||||
type mockKeyring struct {
|
||||
credentials map[string]string // service name -> credential
|
||||
}
|
||||
|
||||
const mockServiceName = "mock-service-name"
|
||||
|
||||
func newMockKeyring() *mockKeyring {
|
||||
return &mockKeyring{credentials: make(map[string]string)}
|
||||
}
|
||||
|
||||
func (m *mockKeyring) Read(_ *url.URL) (string, error) {
|
||||
cred, ok := m.credentials[mockServiceName]
|
||||
if !ok {
|
||||
return "", os.ErrNotExist
|
||||
}
|
||||
return cred, nil
|
||||
}
|
||||
|
||||
func (m *mockKeyring) Write(_ *url.URL, token string) error {
|
||||
m.credentials[mockServiceName] = token
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockKeyring) Delete(_ *url.URL) error {
|
||||
_, ok := m.credentials[mockServiceName]
|
||||
if !ok {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
delete(m.credentials, mockServiceName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestUseKeyring(t *testing.T) {
|
||||
// Verify that the --use-keyring flag opts into using a keyring backend for
|
||||
// storing session tokens instead of plain text files.
|
||||
t.Parallel()
|
||||
|
||||
t.Run("Login", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a test server
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Create a pty for interactive prompts
|
||||
pty := ptytest.New(t)
|
||||
|
||||
// Create CLI invocation with --use-keyring flag
|
||||
inv, cfg := clitest.New(t,
|
||||
"login",
|
||||
"--force-tty",
|
||||
"--use-keyring",
|
||||
"--no-open",
|
||||
client.URL.String(),
|
||||
)
|
||||
inv.Stdin = pty.Input()
|
||||
inv.Stdout = pty.Output()
|
||||
|
||||
// Inject the mock backend before running the command
|
||||
var root cli.RootCmd
|
||||
cmd, err := root.Command(root.AGPL())
|
||||
require.NoError(t, err)
|
||||
mockBackend := newMockKeyring()
|
||||
root.WithSessionStorageBackend(mockBackend)
|
||||
inv.Command = cmd
|
||||
|
||||
// Run login in background
|
||||
doneChan := make(chan struct{})
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
// Provide the token when prompted
|
||||
pty.ExpectMatch("Paste your token here:")
|
||||
pty.WriteLine(client.SessionToken())
|
||||
pty.ExpectMatch("Welcome to Coder")
|
||||
<-doneChan
|
||||
|
||||
// Verify that session file was NOT created (using keyring instead)
|
||||
sessionFile := path.Join(string(cfg), "session")
|
||||
_, err = os.Stat(sessionFile)
|
||||
require.True(t, os.IsNotExist(err), "session file should not exist when using keyring")
|
||||
|
||||
// Verify that the credential IS stored in mock keyring
|
||||
cred, err := mockBackend.Read(nil)
|
||||
require.NoError(t, err, "credential should be stored in mock keyring")
|
||||
require.Equal(t, client.SessionToken(), cred, "stored token should match login token")
|
||||
})
|
||||
|
||||
t.Run("Logout", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a test server
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Create a pty for interactive prompts
|
||||
pty := ptytest.New(t)
|
||||
|
||||
// First, login with --use-keyring
|
||||
loginInv, cfg := clitest.New(t,
|
||||
"login",
|
||||
"--force-tty",
|
||||
"--use-keyring",
|
||||
"--no-open",
|
||||
client.URL.String(),
|
||||
)
|
||||
loginInv.Stdin = pty.Input()
|
||||
loginInv.Stdout = pty.Output()
|
||||
|
||||
// Inject the mock backend
|
||||
var loginRoot cli.RootCmd
|
||||
loginCmd, err := loginRoot.Command(loginRoot.AGPL())
|
||||
require.NoError(t, err)
|
||||
mockBackend := newMockKeyring()
|
||||
loginRoot.WithSessionStorageBackend(mockBackend)
|
||||
loginInv.Command = loginCmd
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := loginInv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
pty.ExpectMatch("Paste your token here:")
|
||||
pty.WriteLine(client.SessionToken())
|
||||
pty.ExpectMatch("Welcome to Coder")
|
||||
<-doneChan
|
||||
|
||||
// Verify credential exists in mock keyring
|
||||
cred, err := mockBackend.Read(nil)
|
||||
require.NoError(t, err, "read credential should succeed before logout")
|
||||
require.NotEmpty(t, cred, "credential should exist after logout")
|
||||
|
||||
// Now run logout with --use-keyring
|
||||
logoutInv, _ := clitest.New(t,
|
||||
"logout",
|
||||
"--use-keyring",
|
||||
"--yes",
|
||||
"--global-config", string(cfg),
|
||||
)
|
||||
|
||||
// Inject the same mock backend
|
||||
var logoutRoot cli.RootCmd
|
||||
logoutCmd, err := logoutRoot.Command(logoutRoot.AGPL())
|
||||
require.NoError(t, err)
|
||||
logoutRoot.WithSessionStorageBackend(mockBackend)
|
||||
logoutInv.Command = logoutCmd
|
||||
|
||||
var logoutOut bytes.Buffer
|
||||
logoutInv.Stdout = &logoutOut
|
||||
|
||||
err = logoutInv.Run()
|
||||
require.NoError(t, err, "logout should succeed")
|
||||
|
||||
// Verify the credential was deleted from mock keyring
|
||||
_, err = mockBackend.Read(nil)
|
||||
require.ErrorIs(t, err, os.ErrNotExist, "credential should be deleted from keyring after logout")
|
||||
})
|
||||
|
||||
t.Run("OmitFlag", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a test server
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Create a pty for interactive prompts
|
||||
pty := ptytest.New(t)
|
||||
|
||||
// --use-keyring flag omitted (should use file-based storage)
|
||||
inv, cfg := clitest.New(t,
|
||||
"login",
|
||||
"--force-tty",
|
||||
"--no-open",
|
||||
client.URL.String(),
|
||||
)
|
||||
inv.Stdin = pty.Input()
|
||||
inv.Stdout = pty.Output()
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
pty.ExpectMatch("Paste your token here:")
|
||||
pty.WriteLine(client.SessionToken())
|
||||
pty.ExpectMatch("Welcome to Coder")
|
||||
<-doneChan
|
||||
|
||||
// Verify that session file WAS created (not using keyring)
|
||||
sessionFile := path.Join(string(cfg), "session")
|
||||
_, err := os.Stat(sessionFile)
|
||||
require.NoError(t, err, "session file should exist when NOT using --use-keyring")
|
||||
|
||||
// Read and verify the token from file
|
||||
content, err := os.ReadFile(sessionFile)
|
||||
require.NoError(t, err, "should be able to read session file")
|
||||
require.Equal(t, client.SessionToken(), string(content), "file should contain the session token")
|
||||
})
|
||||
|
||||
t.Run("EnvironmentVariable", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a test server
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Create a pty for interactive prompts
|
||||
pty := ptytest.New(t)
|
||||
|
||||
// Login using CODER_USE_KEYRING environment variable instead of flag
|
||||
inv, cfg := clitest.New(t,
|
||||
"login",
|
||||
"--force-tty",
|
||||
"--no-open",
|
||||
client.URL.String(),
|
||||
)
|
||||
inv.Stdin = pty.Input()
|
||||
inv.Stdout = pty.Output()
|
||||
inv.Environ.Set("CODER_USE_KEYRING", "true")
|
||||
|
||||
// Inject the mock backend
|
||||
var root cli.RootCmd
|
||||
cmd, err := root.Command(root.AGPL())
|
||||
require.NoError(t, err)
|
||||
mockBackend := newMockKeyring()
|
||||
root.WithSessionStorageBackend(mockBackend)
|
||||
inv.Command = cmd
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
pty.ExpectMatch("Paste your token here:")
|
||||
pty.WriteLine(client.SessionToken())
|
||||
pty.ExpectMatch("Welcome to Coder")
|
||||
<-doneChan
|
||||
|
||||
// Verify that session file was NOT created (using keyring via env var)
|
||||
sessionFile := path.Join(string(cfg), "session")
|
||||
_, err = os.Stat(sessionFile)
|
||||
require.True(t, os.IsNotExist(err), "session file should not exist when using keyring via env var")
|
||||
|
||||
// Verify credential is in mock keyring
|
||||
cred, err := mockBackend.Read(nil)
|
||||
require.NoError(t, err, "credential should be stored in keyring when CODER_USE_KEYRING=true")
|
||||
require.NotEmpty(t, cred)
|
||||
})
|
||||
}
|
||||
|
||||
func TestUseKeyringUnsupportedOS(t *testing.T) {
|
||||
// Verify that trying to use --use-keyring on an unsupported operating system produces
|
||||
// a helpful error message.
|
||||
t.Parallel()
|
||||
|
||||
// Skip on Windows since the keyring is actually supported.
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Skipping unsupported OS test on Windows where keyring is supported")
|
||||
}
|
||||
|
||||
const expMessage = "keyring storage is not supported on this operating system; remove the --use-keyring flag"
|
||||
|
||||
t.Run("LoginWithUnsupportedKeyring", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Try to login with --use-keyring on an unsupported OS
|
||||
inv, _ := clitest.New(t,
|
||||
"login",
|
||||
"--use-keyring",
|
||||
client.URL.String(),
|
||||
)
|
||||
|
||||
// The error should occur immediately, before any prompts
|
||||
loginErr := inv.Run()
|
||||
|
||||
// Verify we got an error about unsupported OS
|
||||
require.Error(t, loginErr)
|
||||
require.Contains(t, loginErr.Error(), expMessage)
|
||||
})
|
||||
|
||||
t.Run("LogoutWithUnsupportedKeyring", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
pty := ptytest.New(t)
|
||||
|
||||
// First login without keyring to create a session
|
||||
loginInv, cfg := clitest.New(t,
|
||||
"login",
|
||||
"--force-tty",
|
||||
"--no-open",
|
||||
client.URL.String(),
|
||||
)
|
||||
loginInv.Stdin = pty.Input()
|
||||
loginInv.Stdout = pty.Output()
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := loginInv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
pty.ExpectMatch("Paste your token here:")
|
||||
pty.WriteLine(client.SessionToken())
|
||||
pty.ExpectMatch("Welcome to Coder")
|
||||
<-doneChan
|
||||
|
||||
// Now try to logout with --use-keyring on an unsupported OS
|
||||
logoutInv, _ := clitest.New(t,
|
||||
"logout",
|
||||
"--use-keyring",
|
||||
"--yes",
|
||||
"--global-config", string(cfg),
|
||||
)
|
||||
|
||||
err := logoutInv.Run()
|
||||
// Verify we got an error about unsupported OS
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), expMessage)
|
||||
})
|
||||
}
|
||||
+24
-5
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/coder/pretty"
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/cli/sessionstore"
|
||||
"github.com/coder/coder/v2/coderd/userpassword"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/serpent"
|
||||
@@ -114,9 +115,11 @@ func (r *RootCmd) loginWithPassword(
|
||||
}
|
||||
|
||||
sessionToken := resp.SessionToken
|
||||
config := r.createConfig()
|
||||
err = config.Session().Write(sessionToken)
|
||||
err = r.ensureTokenBackend().Write(client.URL, sessionToken)
|
||||
if err != nil {
|
||||
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
|
||||
return errKeyringNotSupported
|
||||
}
|
||||
return xerrors.Errorf("write session token: %w", err)
|
||||
}
|
||||
|
||||
@@ -149,11 +152,15 @@ func (r *RootCmd) login() *serpent.Command {
|
||||
useTokenForSession bool
|
||||
)
|
||||
cmd := &serpent.Command{
|
||||
Use: "login [<url>]",
|
||||
Short: "Authenticate with Coder deployment",
|
||||
Use: "login [<url>]",
|
||||
Short: "Authenticate with Coder deployment",
|
||||
Long: "By default, the session token is stored in a plain text file. Use the " +
|
||||
"--use-keyring flag or set CODER_USE_KEYRING=true to store the token in " +
|
||||
"the operating system keyring instead.",
|
||||
Middleware: serpent.RequireRangeArgs(0, 1),
|
||||
Handler: func(inv *serpent.Invocation) error {
|
||||
ctx := inv.Context()
|
||||
|
||||
rawURL := ""
|
||||
var urlSource string
|
||||
|
||||
@@ -198,6 +205,15 @@ func (r *RootCmd) login() *serpent.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check keyring availability before prompting the user for a token to fail fast.
|
||||
if r.useKeyring {
|
||||
backend := r.ensureTokenBackend()
|
||||
_, err := backend.Read(client.URL)
|
||||
if err != nil && xerrors.Is(err, sessionstore.ErrNotImplemented) {
|
||||
return errKeyringNotSupported
|
||||
}
|
||||
}
|
||||
|
||||
hasFirstUser, err := client.HasFirstUser(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to check server %q for first user, is the URL correct and is coder accessible from your browser? Error - has initial user: %w", serverURL.String(), err)
|
||||
@@ -394,8 +410,11 @@ func (r *RootCmd) login() *serpent.Command {
|
||||
}
|
||||
|
||||
config := r.createConfig()
|
||||
err = config.Session().Write(sessionToken)
|
||||
err = r.ensureTokenBackend().Write(client.URL, sessionToken)
|
||||
if err != nil {
|
||||
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
|
||||
return errKeyringNotSupported
|
||||
}
|
||||
return xerrors.Errorf("write session token: %w", err)
|
||||
}
|
||||
err = config.URL().Write(serverURL.String())
|
||||
|
||||
+8
-3
@@ -8,6 +8,7 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/cli/sessionstore"
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
|
||||
@@ -46,11 +47,15 @@ func (r *RootCmd) logout() *serpent.Command {
|
||||
errors = append(errors, xerrors.Errorf("remove URL file: %w", err))
|
||||
}
|
||||
|
||||
err = config.Session().Delete()
|
||||
err = r.ensureTokenBackend().Delete(client.URL)
|
||||
// Only throw error if the session configuration file is present,
|
||||
// otherwise the user is already logged out, and we proceed
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
errors = append(errors, xerrors.Errorf("remove session file: %w", err))
|
||||
if err != nil && !xerrors.Is(err, os.ErrNotExist) {
|
||||
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
|
||||
errors = append(errors, errKeyringNotSupported)
|
||||
} else {
|
||||
errors = append(errors, xerrors.Errorf("remove session token: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
err = config.Organization().Delete()
|
||||
|
||||
+53
-10
@@ -37,6 +37,7 @@ import (
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/cli/config"
|
||||
"github.com/coder/coder/v2/cli/gitauth"
|
||||
"github.com/coder/coder/v2/cli/sessionstore"
|
||||
"github.com/coder/coder/v2/cli/telemetry"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
@@ -54,6 +55,8 @@ var (
|
||||
// ErrSilent is a sentinel error that tells the command handler to just exit with a non-zero error, but not print
|
||||
// anything.
|
||||
ErrSilent = xerrors.New("silent error")
|
||||
|
||||
errKeyringNotSupported = xerrors.New("keyring storage is not supported on this operating system; remove the --use-keyring flag to use file-based storage")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -68,12 +71,14 @@ const (
|
||||
varVerbose = "verbose"
|
||||
varDisableDirect = "disable-direct-connections"
|
||||
varDisableNetworkTelemetry = "disable-network-telemetry"
|
||||
varUseKeyring = "use-keyring"
|
||||
|
||||
notLoggedInMessage = "You are not logged in. Try logging in using '%s login <url>'."
|
||||
|
||||
envNoVersionCheck = "CODER_NO_VERSION_WARNING"
|
||||
envNoFeatureWarning = "CODER_NO_FEATURE_WARNING"
|
||||
envSessionToken = "CODER_SESSION_TOKEN"
|
||||
envUseKeyring = "CODER_USE_KEYRING"
|
||||
//nolint:gosec
|
||||
envAgentToken = "CODER_AGENT_TOKEN"
|
||||
//nolint:gosec
|
||||
@@ -474,6 +479,15 @@ func (r *RootCmd) Command(subcommands []*serpent.Command) (*serpent.Command, err
|
||||
Value: serpent.BoolOf(&r.disableNetworkTelemetry),
|
||||
Group: globalGroup,
|
||||
},
|
||||
{
|
||||
Flag: varUseKeyring,
|
||||
Env: envUseKeyring,
|
||||
Description: "Store and retrieve session tokens using the operating system " +
|
||||
"keyring. Currently only supported on Windows. By default, tokens are " +
|
||||
"stored in plain text files.",
|
||||
Value: serpent.BoolOf(&r.useKeyring),
|
||||
Group: globalGroup,
|
||||
},
|
||||
{
|
||||
Flag: "debug-http",
|
||||
Description: "Debug codersdk HTTP requests.",
|
||||
@@ -508,6 +522,7 @@ func (r *RootCmd) Command(subcommands []*serpent.Command) (*serpent.Command, err
|
||||
type RootCmd struct {
|
||||
clientURL *url.URL
|
||||
token string
|
||||
tokenBackend sessionstore.Backend
|
||||
globalConfig string
|
||||
header []string
|
||||
headerCommand string
|
||||
@@ -522,6 +537,7 @@ type RootCmd struct {
|
||||
disableNetworkTelemetry bool
|
||||
noVersionCheck bool
|
||||
noFeatureWarning bool
|
||||
useKeyring bool
|
||||
}
|
||||
|
||||
// InitClient creates and configures a new client with authentication, telemetry,
|
||||
@@ -549,14 +565,19 @@ func (r *RootCmd) InitClient(inv *serpent.Invocation) (*codersdk.Client, error)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Read the token stored on disk.
|
||||
if r.token == "" {
|
||||
r.token, err = conf.Session().Read()
|
||||
tok, err := r.ensureTokenBackend().Read(r.clientURL)
|
||||
// Even if there isn't a token, we don't care.
|
||||
// Some API routes can be unauthenticated.
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
if err != nil && !xerrors.Is(err, os.ErrNotExist) {
|
||||
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
|
||||
return nil, errKeyringNotSupported
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if tok != "" {
|
||||
r.token = tok
|
||||
}
|
||||
}
|
||||
|
||||
// Configure HTTP client with transport wrappers
|
||||
@@ -588,7 +609,6 @@ func (r *RootCmd) InitClient(inv *serpent.Invocation) (*codersdk.Client, error)
|
||||
// This allows commands to run without requiring authentication, but still use auth if available.
|
||||
func (r *RootCmd) TryInitClient(inv *serpent.Invocation) (*codersdk.Client, error) {
|
||||
conf := r.createConfig()
|
||||
var err error
|
||||
// Read the client URL stored on disk.
|
||||
if r.clientURL == nil || r.clientURL.String() == "" {
|
||||
rawURL, err := conf.URL().Read()
|
||||
@@ -605,14 +625,19 @@ func (r *RootCmd) TryInitClient(inv *serpent.Invocation) (*codersdk.Client, erro
|
||||
}
|
||||
}
|
||||
}
|
||||
// Read the token stored on disk.
|
||||
if r.token == "" {
|
||||
r.token, err = conf.Session().Read()
|
||||
tok, err := r.ensureTokenBackend().Read(r.clientURL)
|
||||
// Even if there isn't a token, we don't care.
|
||||
// Some API routes can be unauthenticated.
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
if err != nil && !xerrors.Is(err, os.ErrNotExist) {
|
||||
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
|
||||
return nil, errKeyringNotSupported
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if tok != "" {
|
||||
r.token = tok
|
||||
}
|
||||
}
|
||||
|
||||
// Only configure the client if we have a URL
|
||||
@@ -688,6 +713,24 @@ func (r *RootCmd) createUnauthenticatedClient(ctx context.Context, serverURL *ur
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// ensureTokenBackend returns the session token storage backend, creating it if necessary.
|
||||
// This must be called after flags are parsed so we can respect the value of the --use-keyring
|
||||
// flag.
|
||||
func (r *RootCmd) ensureTokenBackend() sessionstore.Backend {
|
||||
if r.tokenBackend == nil {
|
||||
if r.useKeyring {
|
||||
r.tokenBackend = sessionstore.NewKeyring()
|
||||
} else {
|
||||
r.tokenBackend = sessionstore.NewFile(r.createConfig)
|
||||
}
|
||||
}
|
||||
return r.tokenBackend
|
||||
}
|
||||
|
||||
func (r *RootCmd) WithSessionStorageBackend(backend sessionstore.Backend) {
|
||||
r.tokenBackend = backend
|
||||
}
|
||||
|
||||
type AgentAuth struct {
|
||||
// Agent Client config
|
||||
agentToken string
|
||||
@@ -1323,7 +1366,7 @@ func defaultUpgradeMessage(version string) string {
|
||||
// to the GitHub release page to download the latest installer.
|
||||
version = strings.TrimPrefix(version, "v")
|
||||
if runtime.GOOS == "windows" {
|
||||
return fmt.Sprintf("download the server version from: https://github.com/coder/coder/releases/v%s", version)
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("download the server version with: 'curl -L https://coder.com/install.sh | sh -s -- --version %s'", version)
|
||||
}
|
||||
@@ -1369,8 +1412,8 @@ func wrapTransportWithVersionMismatchCheck(rt http.RoundTripper, inv *serpent.In
|
||||
switch {
|
||||
case serverInfo.UpgradeMessage != "":
|
||||
upgradeMessage = serverInfo.UpgradeMessage
|
||||
// The site-local `install.sh` was introduced in v2.19.0
|
||||
case serverInfo.DashboardURL != "" && semver.Compare(semver.MajorMinor(serverVersion), "v2.19") >= 0:
|
||||
// The site-local `install.sh` was introduced in v2.19.0. Skip curl instruction on Windows.
|
||||
case runtime.GOOS != "windows" && serverInfo.DashboardURL != "" && semver.Compare(semver.MajorMinor(serverVersion), "v2.19") >= 0:
|
||||
upgradeMessage = fmt.Sprintf("download %s with: 'curl -fsSL %s/install.sh | sh'", serverVersion, serverInfo.DashboardURL)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,239 @@
|
||||
// Package sessionstore provides CLI session token storage mechanisms.
|
||||
// Operating system keyring storage is intended to have compatibility with other Coder
|
||||
// applications (e.g. Coder Desktop, Coder provider for JetBrains Toolbox, etc) so that
|
||||
// applications can read/write the same credential stored in the keyring.
|
||||
//
|
||||
// Note that we aren't using an existing Go package zalando/go-keyring here for a few
|
||||
// reasons. 1) It prescribes the format of the target credential name in the OS keyrings,
|
||||
// which makes our life difficult for compatibility with other Coder applications. 2)
|
||||
// It uses init functions that make it difficult to test with. As a result, the OS
|
||||
// keyring implementations may be adapted from zalando/go-keyring source (i.e. Windows).
|
||||
package sessionstore
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/config"
|
||||
)
|
||||
|
||||
// Backend is a storage backend for session tokens.
|
||||
type Backend interface {
|
||||
// Read returns the session token for the given server URL or an error, if any. It
|
||||
// will return os.ErrNotExist if no token exists for the given URL.
|
||||
Read(serverURL *url.URL) (string, error)
|
||||
// Write stores the session token for the given server URL.
|
||||
Write(serverURL *url.URL, token string) error
|
||||
// Delete removes the session token for the given server URL or an error, if any.
|
||||
// It will return os.ErrNotExist error if no token exists to delete.
|
||||
Delete(serverURL *url.URL) error
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
// ErrSetDataTooBig is returned if `keyringProvider.Set` was called with too much data.
|
||||
// On macOS: The combination of service, username & password should not exceed ~3000 bytes
|
||||
// On Windows: The service is limited to 32KiB while the password is limited to 2560 bytes
|
||||
ErrSetDataTooBig = xerrors.New("data passed to Set was too big")
|
||||
|
||||
// ErrNotImplemented represents when keyring usage is not implemented on the current
|
||||
// operating system.
|
||||
ErrNotImplemented = xerrors.New("not implemented")
|
||||
)
|
||||
|
||||
// keyringProvider represents an operating system keyring. The expectation
|
||||
// is these methods operate on the user/login keyring.
|
||||
type keyringProvider interface {
|
||||
// Set stores the given credential for a service name in the operating system
|
||||
// keyring.
|
||||
Set(service, credential string) error
|
||||
// Get retrieves the credential from the keyring. It must return os.ErrNotExist
|
||||
// if the credential is not found.
|
||||
Get(service string) ([]byte, error)
|
||||
// Delete deletes the credential from the keyring. It must return os.ErrNotExist
|
||||
// if the credential is not found.
|
||||
Delete(service string) error
|
||||
}
|
||||
|
||||
// credential represents a single credential entry.
|
||||
type credential struct {
|
||||
CoderURL string `json:"coder_url"`
|
||||
APIToken string `json:"api_token"`
|
||||
}
|
||||
|
||||
// credentialsMap represents the JSON structure stored in the operating system keyring.
|
||||
// It supports storing multiple credentials for different server URLs.
|
||||
type credentialsMap map[string]credential
|
||||
|
||||
// normalizeHost returns a normalized version of the URL host for use as a map key.
|
||||
func normalizeHost(u *url.URL) (string, error) {
|
||||
if u == nil || u.Host == "" {
|
||||
return "", xerrors.New("nil server URL")
|
||||
}
|
||||
return strings.TrimSpace(strings.ToLower(u.Host)), nil
|
||||
}
|
||||
|
||||
// parseCredentialsJSON parses the JSON from the keyring into a credentialsMap.
|
||||
func parseCredentialsJSON(jsonData []byte) (credentialsMap, error) {
|
||||
if len(jsonData) == 0 {
|
||||
return make(credentialsMap), nil
|
||||
}
|
||||
|
||||
var creds credentialsMap
|
||||
if err := json.Unmarshal(jsonData, &creds); err != nil {
|
||||
return nil, xerrors.Errorf("unmarshal credentials: %w", err)
|
||||
}
|
||||
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
// Keyring is a Backend that exclusively stores the session token in the operating
|
||||
// system keyring. Happy path usage of this type should start with NewKeyring.
|
||||
// It stores a JSON object in the keyring that supports multiple credentials for
|
||||
// different server URLs, providing compatibility with Coder Desktop and other Coder
|
||||
// applications.
|
||||
type Keyring struct {
|
||||
provider keyringProvider
|
||||
serviceName string
|
||||
}
|
||||
|
||||
// NewKeyring creates a Keyring with the default service name for production use.
|
||||
func NewKeyring() Keyring {
|
||||
return Keyring{
|
||||
provider: operatingSystemKeyring{},
|
||||
serviceName: defaultServiceName,
|
||||
}
|
||||
}
|
||||
|
||||
// NewKeyringWithService creates a Keyring Backend that stores credentials under the
|
||||
// specified service name. This is primarily intended for testing to avoid conflicts
|
||||
// with production credentials and collisions between tests.
|
||||
func NewKeyringWithService(serviceName string) Keyring {
|
||||
return Keyring{
|
||||
provider: operatingSystemKeyring{},
|
||||
serviceName: serviceName,
|
||||
}
|
||||
}
|
||||
|
||||
func (o Keyring) Read(serverURL *url.URL) (string, error) {
|
||||
host, err := normalizeHost(serverURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
credJSON, err := o.provider.Get(o.serviceName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(credJSON) == 0 {
|
||||
return "", os.ErrNotExist
|
||||
}
|
||||
|
||||
creds, err := parseCredentialsJSON(credJSON)
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("read: parse existing credentials: %w", err)
|
||||
}
|
||||
|
||||
// Return the credential for the specified URL
|
||||
cred, ok := creds[host]
|
||||
if !ok {
|
||||
return "", os.ErrNotExist
|
||||
}
|
||||
return cred.APIToken, nil
|
||||
}
|
||||
|
||||
func (o Keyring) Write(serverURL *url.URL, token string) error {
|
||||
host, err := normalizeHost(serverURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
existingJSON, err := o.provider.Get(o.serviceName)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return xerrors.Errorf("read existing credentials: %w", err)
|
||||
}
|
||||
|
||||
creds, err := parseCredentialsJSON(existingJSON)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("write: parse existing credentials: %w", err)
|
||||
}
|
||||
|
||||
// Upsert the credential for this URL.
|
||||
creds[host] = credential{
|
||||
CoderURL: host,
|
||||
APIToken: token,
|
||||
}
|
||||
|
||||
credsJSON, err := json.Marshal(creds)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("marshal credentials: %w", err)
|
||||
}
|
||||
|
||||
err = o.provider.Set(o.serviceName, string(credsJSON))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("write credentials to keyring: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o Keyring) Delete(serverURL *url.URL) error {
|
||||
host, err := normalizeHost(serverURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
existingJSON, err := o.provider.Get(o.serviceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
creds, err := parseCredentialsJSON(existingJSON)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to parse existing credentials: %w", err)
|
||||
}
|
||||
|
||||
if _, ok := creds[host]; !ok {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
|
||||
delete(creds, host)
|
||||
|
||||
// Delete the entire keyring entry when no credentials remain.
|
||||
if len(creds) == 0 {
|
||||
return o.provider.Delete(o.serviceName)
|
||||
}
|
||||
|
||||
// Write back the updated credentials map.
|
||||
credsJSON, err := json.Marshal(creds)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to marshal credentials: %w", err)
|
||||
}
|
||||
|
||||
return o.provider.Set(o.serviceName, string(credsJSON))
|
||||
}
|
||||
|
||||
// File is a Backend that exclusively stores the session token in a file on disk.
|
||||
type File struct {
|
||||
config func() config.Root
|
||||
}
|
||||
|
||||
func NewFile(f func() config.Root) *File {
|
||||
return &File{config: f}
|
||||
}
|
||||
|
||||
func (f *File) Read(_ *url.URL) (string, error) {
|
||||
return f.config().Session().Read()
|
||||
}
|
||||
|
||||
func (f *File) Write(_ *url.URL, token string) error {
|
||||
return f.config().Session().Write(token)
|
||||
}
|
||||
|
||||
func (f *File) Delete(_ *url.URL) error {
|
||||
return f.config().Session().Delete()
|
||||
}
|
||||
@@ -0,0 +1,121 @@
|
||||
package sessionstore
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNormalizeHost(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
url *url.URL
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "StandardHost",
|
||||
url: &url.URL{Host: "coder.example.com"},
|
||||
want: "coder.example.com",
|
||||
},
|
||||
{
|
||||
name: "HostWithPort",
|
||||
url: &url.URL{Host: "coder.example.com:8080"},
|
||||
want: "coder.example.com:8080",
|
||||
},
|
||||
{
|
||||
name: "UppercaseHost",
|
||||
url: &url.URL{Host: "CODER.EXAMPLE.COM"},
|
||||
want: "coder.example.com",
|
||||
},
|
||||
{
|
||||
name: "HostWithWhitespace",
|
||||
url: &url.URL{Host: " coder.example.com "},
|
||||
want: "coder.example.com",
|
||||
},
|
||||
{
|
||||
name: "NilURL",
|
||||
url: nil,
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "EmptyHost",
|
||||
url: &url.URL{Host: ""},
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got, err := normalizeHost(tt.url)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseCredentialsJSON(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("Empty", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
creds, err := parseCredentialsJSON(nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, creds)
|
||||
require.Empty(t, creds)
|
||||
})
|
||||
|
||||
t.Run("NewFormat", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
jsonData := []byte(`{
|
||||
"coder1.example.com": {"coder_url": "coder1.example.com", "api_token": "token1"},
|
||||
"coder2.example.com": {"coder_url": "coder2.example.com", "api_token": "token2"}
|
||||
}`)
|
||||
creds, err := parseCredentialsJSON(jsonData)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, creds, 2)
|
||||
require.Equal(t, "token1", creds["coder1.example.com"].APIToken)
|
||||
require.Equal(t, "token2", creds["coder2.example.com"].APIToken)
|
||||
})
|
||||
|
||||
t.Run("InvalidJSON", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
jsonData := []byte(`{invalid json}`)
|
||||
_, err := parseCredentialsJSON(jsonData)
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCredentialsMap_RoundTrip(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
creds := credentialsMap{
|
||||
"coder1.example.com": {
|
||||
CoderURL: "coder1.example.com",
|
||||
APIToken: "token1",
|
||||
},
|
||||
"coder2.example.com:8080": {
|
||||
CoderURL: "coder2.example.com:8080",
|
||||
APIToken: "token2",
|
||||
},
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(creds)
|
||||
require.NoError(t, err)
|
||||
|
||||
parsed, err := parseCredentialsJSON(jsonData)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, creds, parsed)
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
//go:build !windows
|
||||
|
||||
package sessionstore
|
||||
|
||||
const defaultServiceName = "not-implemented"
|
||||
|
||||
type operatingSystemKeyring struct{}
|
||||
|
||||
func (operatingSystemKeyring) Set(_, _ string) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
func (operatingSystemKeyring) Get(_ string) ([]byte, error) {
|
||||
return nil, ErrNotImplemented
|
||||
}
|
||||
|
||||
func (operatingSystemKeyring) Delete(_ string) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
@@ -0,0 +1,342 @@
|
||||
package sessionstore_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/cli/config"
|
||||
"github.com/coder/coder/v2/cli/sessionstore"
|
||||
)
|
||||
|
||||
// Generate a test service name for use with the OS keyring. It uses a combination
|
||||
// of the test name and a nanosecond timestamp to prevent collisions.
|
||||
func keyringTestServiceName(t *testing.T) string {
|
||||
t.Helper()
|
||||
return t.Name() + "_" + fmt.Sprintf("%v", time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func TestKeyring(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skip("linux and darwin are not supported yet")
|
||||
}
|
||||
|
||||
// This test exercises use of the operating system keyring. As a result,
|
||||
// the operating system keyring is expected to be available.
|
||||
|
||||
const (
|
||||
testURL = "http://127.0.0.1:1337"
|
||||
testURL2 = "http://127.0.0.1:1338"
|
||||
)
|
||||
|
||||
t.Run("ReadNonExistent", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = backend.Delete(srvURL) })
|
||||
|
||||
_, err = backend.Read(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, os.IsNotExist(err), "expected os.ErrNotExist when reading non-existent token")
|
||||
})
|
||||
|
||||
t.Run("DeleteNonExistent", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = backend.Delete(srvURL) })
|
||||
|
||||
err = backend.Delete(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, errors.Is(err, os.ErrNotExist), "expected os.ErrNotExist when deleting non-existent token")
|
||||
})
|
||||
|
||||
t.Run("WriteAndRead", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = backend.Delete(srvURL) })
|
||||
|
||||
dir := t.TempDir()
|
||||
expSessionFile := path.Join(dir, "session")
|
||||
|
||||
const inputToken = "test-keyring-token-12345"
|
||||
err = backend.Write(srvURL, inputToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify no session file was created (keyring stores in OS keyring, not file)
|
||||
_, err = os.Stat(expSessionFile)
|
||||
require.True(t, errors.Is(err, os.ErrNotExist), "expected session token file to not exist when using keyring")
|
||||
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, inputToken, token)
|
||||
|
||||
// Clean up
|
||||
err = backend.Delete(srvURL)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("WriteAndDelete", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = backend.Delete(srvURL) })
|
||||
|
||||
const inputToken = "test-keyring-token-67890"
|
||||
err = backend.Write(srvURL, inputToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, inputToken, token)
|
||||
|
||||
err = backend.Delete(srvURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = backend.Read(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, os.IsNotExist(err), "expected os.ErrNotExist after deleting token")
|
||||
})
|
||||
|
||||
t.Run("OverwriteToken", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = backend.Delete(srvURL) })
|
||||
|
||||
// Write first token
|
||||
const firstToken = "first-keyring-token"
|
||||
err = backend.Write(srvURL, firstToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstToken, token)
|
||||
|
||||
// Overwrite with second token
|
||||
const secondToken = "second-keyring-token"
|
||||
err = backend.Write(srvURL, secondToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
token, err = backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, secondToken, token)
|
||||
|
||||
// Clean up
|
||||
err = backend.Delete(srvURL)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("MultipleServers", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
srvURL2, err := url.Parse(testURL2)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = backend.Delete(srvURL)
|
||||
_ = backend.Delete(srvURL2)
|
||||
})
|
||||
|
||||
// Write token for server 1
|
||||
const token1 = "token-for-server-1"
|
||||
err = backend.Write(srvURL, token1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write token for server 2 (should NOT overwrite server 1)
|
||||
const token2 = "token-for-server-2"
|
||||
err = backend.Write(srvURL2, token2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Read server 1's credential
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, token1, token)
|
||||
|
||||
// Read server 2's credential
|
||||
token, err = backend.Read(srvURL2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, token2, token)
|
||||
|
||||
// Delete server 1's credential
|
||||
err = backend.Delete(srvURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify server 1's credential is gone
|
||||
_, err = backend.Read(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// Verify server 2's credential still exists
|
||||
token, err = backend.Read(srvURL2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, token2, token)
|
||||
|
||||
// Clean up remaining credentials
|
||||
err = backend.Delete(srvURL2)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFile(t *testing.T) {
|
||||
const (
|
||||
testURL = "http://127.0.0.1:1337"
|
||||
testURL2 = "http://127.0.0.1:1338"
|
||||
)
|
||||
|
||||
t.Parallel()
|
||||
|
||||
t.Run("ReadNonExistent", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dir := t.TempDir()
|
||||
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = backend.Read(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, os.IsNotExist(err))
|
||||
})
|
||||
|
||||
t.Run("WriteAndRead", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dir := t.TempDir()
|
||||
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write a token
|
||||
const inputToken = "test-token-12345"
|
||||
err = backend.Write(srvURL, inputToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the session file was created
|
||||
sessionFile := config.Root(dir).Session()
|
||||
require.True(t, sessionFile.Exists())
|
||||
|
||||
// Read the token back
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, inputToken, token)
|
||||
})
|
||||
|
||||
t.Run("WriteAndDelete", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dir := t.TempDir()
|
||||
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write a token
|
||||
const inputToken = "test-token-67890"
|
||||
err = backend.Write(srvURL, inputToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the token was written
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, inputToken, token)
|
||||
|
||||
// Delete the token
|
||||
err = backend.Delete(srvURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the token is gone
|
||||
_, err = backend.Read(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, os.IsNotExist(err))
|
||||
})
|
||||
|
||||
t.Run("DeleteNonExistent", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dir := t.TempDir()
|
||||
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Attempt to delete a non-existent token
|
||||
err = backend.Delete(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, os.IsNotExist(err))
|
||||
})
|
||||
|
||||
t.Run("OverwriteToken", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dir := t.TempDir()
|
||||
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write first token
|
||||
const firstToken = "first-token"
|
||||
err = backend.Write(srvURL, firstToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstToken, token)
|
||||
|
||||
// Overwrite with second token
|
||||
const secondToken = "second-token"
|
||||
err = backend.Write(srvURL, secondToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
token, err = backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, secondToken, token)
|
||||
})
|
||||
|
||||
t.Run("WriteIgnoresURL", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dir := t.TempDir()
|
||||
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
srvURL2, err := url.Parse(testURL2)
|
||||
require.NoError(t, err)
|
||||
|
||||
//nolint:gosec // Write with first URL test token
|
||||
const firstToken = "token-for-url1"
|
||||
err = backend.Write(srvURL, firstToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
//nolint:gosec // Write with second URL - should overwrite
|
||||
const secondToken = "token-for-url2"
|
||||
err = backend.Write(srvURL2, secondToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should have the second token (File backend doesn't differentiate by URL)
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, secondToken, token)
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,66 @@
|
||||
//go:build windows
|
||||
|
||||
package sessionstore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/danieljoos/wincred"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultServiceName is the service name used in the Windows Credential Manager
|
||||
// for storing Coder CLI session tokens.
|
||||
defaultServiceName = "coder-v2-credentials"
|
||||
)
|
||||
|
||||
// operatingSystemKeyring implements keyringProvider and uses Windows Credential Manager.
|
||||
// It is largely adapted from the zalando/go-keyring package.
|
||||
type operatingSystemKeyring struct{}
|
||||
|
||||
func (operatingSystemKeyring) Set(service, credential string) error {
|
||||
// password may not exceed 2560 bytes (https://github.com/jaraco/keyring/issues/540#issuecomment-968329967)
|
||||
if len(credential) > 2560 {
|
||||
return ErrSetDataTooBig
|
||||
}
|
||||
|
||||
// service may not exceed 512 bytes (might need more testing)
|
||||
if len(service) >= 512 {
|
||||
return ErrSetDataTooBig
|
||||
}
|
||||
|
||||
// service may not exceed 32k but problems occur before that
|
||||
// so we limit it to 30k
|
||||
if len(service) > 1024*30 {
|
||||
return ErrSetDataTooBig
|
||||
}
|
||||
|
||||
cred := wincred.NewGenericCredential(service)
|
||||
cred.CredentialBlob = []byte(credential)
|
||||
cred.Persist = wincred.PersistLocalMachine
|
||||
return cred.Write()
|
||||
}
|
||||
|
||||
func (operatingSystemKeyring) Get(service string) ([]byte, error) {
|
||||
cred, err := wincred.GetGenericCredential(service)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.ERROR_NOT_FOUND) {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return cred.CredentialBlob, nil
|
||||
}
|
||||
|
||||
func (operatingSystemKeyring) Delete(service string) error {
|
||||
cred, err := wincred.GetGenericCredential(service)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.ERROR_NOT_FOUND) {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
return err
|
||||
}
|
||||
return cred.Delete()
|
||||
}
|
||||
@@ -0,0 +1,127 @@
|
||||
//go:build windows
|
||||
|
||||
package sessionstore_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/danieljoos/wincred"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/cli/sessionstore"
|
||||
)
|
||||
|
||||
func TestWindowsKeyring_WriteReadDelete(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const testURL = "http://127.0.0.1:1337"
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
serviceName := keyringTestServiceName(t)
|
||||
backend := sessionstore.NewKeyringWithService(serviceName)
|
||||
t.Cleanup(func() { _ = backend.Delete(srvURL) })
|
||||
|
||||
// Verify no token exists initially
|
||||
_, err = backend.Read(srvURL)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
|
||||
// Write a token
|
||||
const inputToken = "test-token-12345"
|
||||
err = backend.Write(srvURL, inputToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the credential is stored in Windows Credential Manager with correct format
|
||||
winCred, err := wincred.GetGenericCredential(serviceName)
|
||||
require.NoError(t, err, "getting windows credential")
|
||||
|
||||
var storedCreds map[string]struct {
|
||||
CoderURL string `json:"coder_url"`
|
||||
APIToken string `json:"api_token"`
|
||||
}
|
||||
err = json.Unmarshal(winCred.CredentialBlob, &storedCreds)
|
||||
require.NoError(t, err, "unmarshalling stored credentials")
|
||||
|
||||
// Verify the stored values
|
||||
require.Len(t, storedCreds, 1)
|
||||
cred, ok := storedCreds[srvURL.Host]
|
||||
require.True(t, ok, "credential for URL should exist")
|
||||
require.Equal(t, inputToken, cred.APIToken)
|
||||
require.Equal(t, srvURL.Host, cred.CoderURL)
|
||||
|
||||
// Read the token back
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, inputToken, token)
|
||||
|
||||
// Delete the token
|
||||
err = backend.Delete(srvURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify token is deleted
|
||||
_, err = backend.Read(srvURL)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
}
|
||||
|
||||
func TestWindowsKeyring_MultipleServers(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const testURL1 = "http://127.0.0.1:1337"
|
||||
srv1URL, err := url.Parse(testURL1)
|
||||
require.NoError(t, err)
|
||||
|
||||
const testURL2 = "http://127.0.0.1:1338"
|
||||
srv2URL, err := url.Parse(testURL2)
|
||||
require.NoError(t, err)
|
||||
|
||||
serviceName := keyringTestServiceName(t)
|
||||
backend := sessionstore.NewKeyringWithService(serviceName)
|
||||
t.Cleanup(func() {
|
||||
_ = backend.Delete(srv1URL)
|
||||
_ = backend.Delete(srv2URL)
|
||||
})
|
||||
|
||||
// Write token for server 1
|
||||
const token1 = "token-server-1"
|
||||
err = backend.Write(srv1URL, token1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write token for server 2 (should NOT overwrite server 1's token)
|
||||
const token2 = "token-server-2"
|
||||
err = backend.Write(srv2URL, token2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify both credentials are stored in Windows Credential Manager
|
||||
winCred, err := wincred.GetGenericCredential(serviceName)
|
||||
require.NoError(t, err, "getting windows credential")
|
||||
|
||||
var storedCreds map[string]struct {
|
||||
CoderURL string `json:"coder_url"`
|
||||
APIToken string `json:"api_token"`
|
||||
}
|
||||
err = json.Unmarshal(winCred.CredentialBlob, &storedCreds)
|
||||
require.NoError(t, err, "unmarshalling stored credentials")
|
||||
|
||||
// Both credentials should exist
|
||||
require.Len(t, storedCreds, 2)
|
||||
require.Equal(t, token1, storedCreds[srv1URL.Host].APIToken)
|
||||
require.Equal(t, token2, storedCreds[srv2URL.Host].APIToken)
|
||||
|
||||
// Read individual credentials
|
||||
token, err := backend.Read(srv1URL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, token1, token)
|
||||
|
||||
token, err = backend.Read(srv2URL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, token2, token)
|
||||
|
||||
// Cleanup
|
||||
err = backend.Delete(srv1URL)
|
||||
require.NoError(t, err)
|
||||
err = backend.Delete(srv2URL)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
+47
@@ -109,6 +109,51 @@ func (r *RootCmd) ssh() *serpent.Command {
|
||||
}
|
||||
},
|
||||
),
|
||||
CompletionHandler: func(inv *serpent.Invocation) []string {
|
||||
client, err := r.InitClient(inv)
|
||||
if err != nil {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
res, err := client.Workspaces(inv.Context(), codersdk.WorkspaceFilter{
|
||||
Owner: codersdk.Me,
|
||||
})
|
||||
if err != nil {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
var mu sync.Mutex
|
||||
var completions []string
|
||||
var wg sync.WaitGroup
|
||||
for _, ws := range res.Workspaces {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
resources, err := client.TemplateVersionResources(inv.Context(), ws.LatestBuild.TemplateVersionID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var agents []codersdk.WorkspaceAgent
|
||||
for _, resource := range resources {
|
||||
agents = append(agents, resource.Agents...)
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if len(agents) == 1 {
|
||||
completions = append(completions, ws.Name)
|
||||
} else {
|
||||
for _, agent := range agents {
|
||||
completions = append(completions, fmt.Sprintf("%s.%s", ws.Name, agent.Name))
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
slices.Sort(completions)
|
||||
return completions
|
||||
},
|
||||
Handler: func(inv *serpent.Invocation) (retErr error) {
|
||||
client, err := r.InitClient(inv)
|
||||
if err != nil {
|
||||
@@ -906,6 +951,8 @@ func GetWorkspaceAndAgent(ctx context.Context, inv *serpent.Invocation, client *
|
||||
return codersdk.Workspace{}, codersdk.WorkspaceAgent{}, nil, xerrors.Errorf("start workspace with active template version: %w", err)
|
||||
}
|
||||
_, _ = fmt.Fprintln(inv.Stdout, "Unable to start the workspace with template version from last build. Your workspace has been updated to the current active template version.")
|
||||
default:
|
||||
return codersdk.Workspace{}, codersdk.WorkspaceAgent{}, nil, xerrors.Errorf("start workspace with current template version: %w", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return codersdk.Workspace{}, codersdk.WorkspaceAgent{}, nil, xerrors.Errorf("start workspace with current template version: %w", err)
|
||||
|
||||
@@ -2447,3 +2447,99 @@ func tempDirUnixSocket(t *testing.T) string {
|
||||
|
||||
return t.TempDir()
|
||||
}
|
||||
|
||||
func TestSSH_Completion(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("SingleAgent", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, workspace, agentToken := setupWorkspaceForAgent(t)
|
||||
_ = agenttest.New(t, client.URL, agentToken)
|
||||
coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
|
||||
|
||||
var stdout bytes.Buffer
|
||||
inv, root := clitest.New(t, "ssh", "")
|
||||
inv.Stdout = &stdout
|
||||
inv.Environ.Set("COMPLETION_MODE", "1")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitMedium)
|
||||
defer cancel()
|
||||
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
// For single-agent workspaces, the only completion should be the
|
||||
// bare workspace name.
|
||||
output := stdout.String()
|
||||
t.Logf("Completion output: %q", output)
|
||||
require.Contains(t, output, workspace.Name)
|
||||
})
|
||||
|
||||
t.Run("MultiAgent", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, store := coderdtest.NewWithDatabase(t, nil)
|
||||
first := coderdtest.CreateFirstUser(t, client)
|
||||
userClient, user := coderdtest.CreateAnotherUserMutators(t, client, first.OrganizationID, nil, func(r *codersdk.CreateUserRequestWithOrgs) {
|
||||
r.Username = "multiuser"
|
||||
})
|
||||
|
||||
r := dbfake.WorkspaceBuild(t, store, database.WorkspaceTable{
|
||||
Name: "multiworkspace",
|
||||
OrganizationID: first.OrganizationID,
|
||||
OwnerID: user.ID,
|
||||
}).WithAgent(func(agents []*proto.Agent) []*proto.Agent {
|
||||
return []*proto.Agent{
|
||||
{
|
||||
Name: "agent1",
|
||||
Auth: &proto.Agent_Token{},
|
||||
},
|
||||
{
|
||||
Name: "agent2",
|
||||
Auth: &proto.Agent_Token{},
|
||||
},
|
||||
}
|
||||
}).Do()
|
||||
|
||||
var stdout bytes.Buffer
|
||||
inv, root := clitest.New(t, "ssh", "")
|
||||
inv.Stdout = &stdout
|
||||
inv.Environ.Set("COMPLETION_MODE", "1")
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitMedium)
|
||||
defer cancel()
|
||||
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
// For multi-agent workspaces, completions should include the
|
||||
// workspace.agent format but NOT the bare workspace name.
|
||||
output := stdout.String()
|
||||
t.Logf("Completion output: %q", output)
|
||||
lines := strings.Split(strings.TrimSpace(output), "\n")
|
||||
require.NotContains(t, lines, r.Workspace.Name)
|
||||
require.Contains(t, output, r.Workspace.Name+".agent1")
|
||||
require.Contains(t, output, r.Workspace.Name+".agent2")
|
||||
})
|
||||
|
||||
t.Run("NetworkError", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var stdout bytes.Buffer
|
||||
inv, _ := clitest.New(t, "ssh", "")
|
||||
inv.Stdout = &stdout
|
||||
inv.Environ.Set("COMPLETION_MODE", "1")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
output := stdout.String()
|
||||
require.Empty(t, output)
|
||||
})
|
||||
}
|
||||
|
||||
Vendored
+5
@@ -108,6 +108,11 @@ variables or flags.
|
||||
--url url, $CODER_URL
|
||||
URL to a deployment.
|
||||
|
||||
--use-keyring bool, $CODER_USE_KEYRING
|
||||
Store and retrieve session tokens using the operating system keyring.
|
||||
Currently only supported on Windows. By default, tokens are stored in
|
||||
plain text files.
|
||||
|
||||
-v, --verbose bool, $CODER_VERBOSE
|
||||
Enable verbose output.
|
||||
|
||||
|
||||
+2
-1
@@ -90,6 +90,7 @@
|
||||
"allow_renames": false,
|
||||
"favorite": false,
|
||||
"next_start_at": "====[timestamp]=====",
|
||||
"is_prebuild": false
|
||||
"is_prebuild": false,
|
||||
"task_id": null
|
||||
}
|
||||
]
|
||||
|
||||
+4
@@ -5,6 +5,10 @@ USAGE:
|
||||
|
||||
Authenticate with Coder deployment
|
||||
|
||||
By default, the session token is stored in a plain text file. Use the
|
||||
--use-keyring flag or set CODER_USE_KEYRING=true to store the token in the
|
||||
operating system keyring instead.
|
||||
|
||||
OPTIONS:
|
||||
--first-user-email string, $CODER_FIRST_USER_EMAIL
|
||||
Specifies an email address to use if creating the first user for the
|
||||
|
||||
+35
@@ -80,6 +80,41 @@ OPTIONS:
|
||||
Periodically check for new releases of Coder and inform the owner. The
|
||||
check is performed once per day.
|
||||
|
||||
AIBRIDGE OPTIONS:
|
||||
--aibridge-anthropic-base-url string, $CODER_AIBRIDGE_ANTHROPIC_BASE_URL (default: https://api.anthropic.com/)
|
||||
The base URL of the Anthropic API.
|
||||
|
||||
--aibridge-anthropic-key string, $CODER_AIBRIDGE_ANTHROPIC_KEY
|
||||
The key to authenticate against the Anthropic API.
|
||||
|
||||
--aibridge-bedrock-access-key string, $CODER_AIBRIDGE_BEDROCK_ACCESS_KEY
|
||||
The access key to authenticate against the AWS Bedrock API.
|
||||
|
||||
--aibridge-bedrock-access-key-secret string, $CODER_AIBRIDGE_BEDROCK_ACCESS_KEY_SECRET
|
||||
The access key secret to use with the access key to authenticate
|
||||
against the AWS Bedrock API.
|
||||
|
||||
--aibridge-bedrock-model string, $CODER_AIBRIDGE_BEDROCK_MODEL (default: global.anthropic.claude-sonnet-4-5-20250929-v1:0)
|
||||
The model to use when making requests to the AWS Bedrock API.
|
||||
|
||||
--aibridge-bedrock-region string, $CODER_AIBRIDGE_BEDROCK_REGION
|
||||
The AWS Bedrock API region.
|
||||
|
||||
--aibridge-bedrock-small-fastmodel string, $CODER_AIBRIDGE_BEDROCK_SMALL_FAST_MODEL (default: global.anthropic.claude-haiku-4-5-20251001-v1:0)
|
||||
The small fast model to use when making requests to the AWS Bedrock
|
||||
API. Claude Code uses Haiku-class models to perform background tasks.
|
||||
See
|
||||
https://docs.claude.com/en/docs/claude-code/settings#environment-variables.
|
||||
|
||||
--aibridge-enabled bool, $CODER_AIBRIDGE_ENABLED (default: false)
|
||||
Whether to start an in-memory aibridged instance.
|
||||
|
||||
--aibridge-openai-base-url string, $CODER_AIBRIDGE_OPENAI_BASE_URL (default: https://api.openai.com/v1/)
|
||||
The base URL of the OpenAI API.
|
||||
|
||||
--aibridge-openai-key string, $CODER_AIBRIDGE_OPENAI_KEY
|
||||
The key to authenticate against the OpenAI API.
|
||||
|
||||
CLIENT OPTIONS:
|
||||
These options change the behavior of how clients interact with the Coder.
|
||||
Clients include the Coder CLI, Coder Desktop, IDE extensions, and the web UI.
|
||||
|
||||
+5
@@ -16,6 +16,10 @@ USAGE:
|
||||
|
||||
$ coder tokens ls
|
||||
|
||||
- Create a scoped token:
|
||||
|
||||
$ coder tokens create --scope workspace:read --allow workspace:<uuid>
|
||||
|
||||
- Remove a token by ID:
|
||||
|
||||
$ coder tokens rm WuoWs4ZsMX
|
||||
@@ -24,6 +28,7 @@ SUBCOMMANDS:
|
||||
create Create a token
|
||||
list List tokens
|
||||
remove Delete a token
|
||||
view Display detailed information about a token
|
||||
|
||||
———
|
||||
Run `coder --help` for a list of global options.
|
||||
|
||||
+9
-1
@@ -6,12 +6,20 @@ USAGE:
|
||||
Create a token
|
||||
|
||||
OPTIONS:
|
||||
--allow allow-list
|
||||
Repeatable allow-list entry (<type>:<uuid>, e.g. workspace:1234-...).
|
||||
|
||||
--lifetime string, $CODER_TOKEN_LIFETIME
|
||||
Specify a duration for the lifetime of the token.
|
||||
Duration for the token lifetime. Supports standard Go duration units
|
||||
(ns, us, ms, s, m, h) plus d (days) and y (years). Examples: 8h, 30d,
|
||||
1y, 1d12h30m.
|
||||
|
||||
-n, --name string, $CODER_TOKEN_NAME
|
||||
Specify a human-readable name.
|
||||
|
||||
--scope string-array
|
||||
Repeatable scope to attach to the token (e.g. workspace:read).
|
||||
|
||||
-u, --user string, $CODER_TOKEN_USER
|
||||
Specify the user to create the token for (Only works if logged in user
|
||||
is admin).
|
||||
|
||||
+1
-1
@@ -12,7 +12,7 @@ OPTIONS:
|
||||
Specifies whether all users' tokens will be listed or not (must have
|
||||
Owner role to see all tokens).
|
||||
|
||||
-c, --column [id|name|last used|expires at|created at|owner] (default: id,name,last used,expires at,created at)
|
||||
-c, --column [id|name|scopes|allow list|last used|expires at|created at|owner] (default: id,name,scopes,allow list,last used,expires at,created at)
|
||||
Columns to display in table output.
|
||||
|
||||
-o, --output table|json (default: table)
|
||||
|
||||
+16
@@ -0,0 +1,16 @@
|
||||
coder v0.0.0-devel
|
||||
|
||||
USAGE:
|
||||
coder tokens view [flags] <name|id>
|
||||
|
||||
Display detailed information about a token
|
||||
|
||||
OPTIONS:
|
||||
-c, --column [id|name|scopes|allow list|last used|expires at|created at|owner] (default: id,name,scopes,allow list,last used,expires at,created at,owner)
|
||||
Columns to display in table output.
|
||||
|
||||
-o, --output table|json (default: table)
|
||||
Output format.
|
||||
|
||||
———
|
||||
Run `coder --help` for a list of global options.
|
||||
+21
-4
@@ -714,8 +714,7 @@ workspace_prebuilds:
|
||||
# (default: 3, type: int)
|
||||
failure_hard_limit: 3
|
||||
aibridge:
|
||||
# Whether to start an in-memory aibridged instance ("aibridge" experiment must be
|
||||
# enabled, too).
|
||||
# Whether to start an in-memory aibridged instance.
|
||||
# (default: false, type: bool)
|
||||
enabled: false
|
||||
# The base URL of the OpenAI API.
|
||||
@@ -726,7 +725,25 @@ aibridge:
|
||||
openai_key: ""
|
||||
# The base URL of the Anthropic API.
|
||||
# (default: https://api.anthropic.com/, type: string)
|
||||
base_url: https://api.anthropic.com/
|
||||
anthropic_base_url: https://api.anthropic.com/
|
||||
# The key to authenticate against the Anthropic API.
|
||||
# (default: <unset>, type: string)
|
||||
key: ""
|
||||
anthropic_key: ""
|
||||
# The AWS Bedrock API region.
|
||||
# (default: <unset>, type: string)
|
||||
bedrock_region: ""
|
||||
# The access key to authenticate against the AWS Bedrock API.
|
||||
# (default: <unset>, type: string)
|
||||
bedrock_access_key: ""
|
||||
# The access key secret to use with the access key to authenticate against the AWS
|
||||
# Bedrock API.
|
||||
# (default: <unset>, type: string)
|
||||
bedrock_access_key_secret: ""
|
||||
# The model to use when making requests to the AWS Bedrock API.
|
||||
# (default: global.anthropic.claude-sonnet-4-5-20250929-v1:0, type: string)
|
||||
bedrock_model: global.anthropic.claude-sonnet-4-5-20250929-v1:0
|
||||
# The small fast model to use when making requests to the AWS Bedrock API. Claude
|
||||
# Code uses Haiku-class models to perform background tasks. See
|
||||
# https://docs.claude.com/en/docs/claude-code/settings#environment-variables.
|
||||
# (default: global.anthropic.claude-haiku-4-5-20251001-v1:0, type: string)
|
||||
bedrock_small_fast_model: global.anthropic.claude-haiku-4-5-20251001-v1:0
|
||||
|
||||
+104
-6
@@ -4,12 +4,14 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/coderd/util/slice"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
@@ -27,6 +29,10 @@ func (r *RootCmd) tokens() *serpent.Command {
|
||||
Description: "List your tokens",
|
||||
Command: "coder tokens ls",
|
||||
},
|
||||
Example{
|
||||
Description: "Create a scoped token",
|
||||
Command: "coder tokens create --scope workspace:read --allow workspace:<uuid>",
|
||||
},
|
||||
Example{
|
||||
Description: "Remove a token by ID",
|
||||
Command: "coder tokens rm WuoWs4ZsMX",
|
||||
@@ -39,6 +45,7 @@ func (r *RootCmd) tokens() *serpent.Command {
|
||||
Children: []*serpent.Command{
|
||||
r.createToken(),
|
||||
r.listTokens(),
|
||||
r.viewToken(),
|
||||
r.removeToken(),
|
||||
},
|
||||
}
|
||||
@@ -50,6 +57,8 @@ func (r *RootCmd) createToken() *serpent.Command {
|
||||
tokenLifetime string
|
||||
name string
|
||||
user string
|
||||
scopes []string
|
||||
allowList []codersdk.APIAllowListTarget
|
||||
)
|
||||
cmd := &serpent.Command{
|
||||
Use: "create",
|
||||
@@ -88,10 +97,18 @@ func (r *RootCmd) createToken() *serpent.Command {
|
||||
}
|
||||
}
|
||||
|
||||
res, err := client.CreateToken(inv.Context(), userID, codersdk.CreateTokenRequest{
|
||||
req := codersdk.CreateTokenRequest{
|
||||
Lifetime: parsedLifetime,
|
||||
TokenName: name,
|
||||
})
|
||||
}
|
||||
if len(req.Scopes) == 0 {
|
||||
req.Scopes = slice.StringEnums[codersdk.APIKeyScope](scopes)
|
||||
}
|
||||
if len(allowList) > 0 {
|
||||
req.AllowList = append([]codersdk.APIAllowListTarget(nil), allowList...)
|
||||
}
|
||||
|
||||
res, err := client.CreateToken(inv.Context(), userID, req)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create tokens: %w", err)
|
||||
}
|
||||
@@ -106,7 +123,7 @@ func (r *RootCmd) createToken() *serpent.Command {
|
||||
{
|
||||
Flag: "lifetime",
|
||||
Env: "CODER_TOKEN_LIFETIME",
|
||||
Description: "Specify a duration for the lifetime of the token.",
|
||||
Description: "Duration for the token lifetime. Supports standard Go duration units (ns, us, ms, s, m, h) plus d (days) and y (years). Examples: 8h, 30d, 1y, 1d12h30m.",
|
||||
Value: serpent.StringOf(&tokenLifetime),
|
||||
},
|
||||
{
|
||||
@@ -123,6 +140,16 @@ func (r *RootCmd) createToken() *serpent.Command {
|
||||
Description: "Specify the user to create the token for (Only works if logged in user is admin).",
|
||||
Value: serpent.StringOf(&user),
|
||||
},
|
||||
{
|
||||
Flag: "scope",
|
||||
Description: "Repeatable scope to attach to the token (e.g. workspace:read).",
|
||||
Value: serpent.StringArrayOf(&scopes),
|
||||
},
|
||||
{
|
||||
Flag: "allow",
|
||||
Description: "Repeatable allow-list entry (<type>:<uuid>, e.g. workspace:1234-...).",
|
||||
Value: AllowListFlagOf(&allowList),
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
@@ -136,6 +163,8 @@ type tokenListRow struct {
|
||||
// For table format:
|
||||
ID string `json:"-" table:"id,default_sort"`
|
||||
TokenName string `json:"token_name" table:"name"`
|
||||
Scopes string `json:"-" table:"scopes"`
|
||||
Allow string `json:"-" table:"allow list"`
|
||||
LastUsed time.Time `json:"-" table:"last used"`
|
||||
ExpiresAt time.Time `json:"-" table:"expires at"`
|
||||
CreatedAt time.Time `json:"-" table:"created at"`
|
||||
@@ -143,20 +172,47 @@ type tokenListRow struct {
|
||||
}
|
||||
|
||||
func tokenListRowFromToken(token codersdk.APIKeyWithOwner) tokenListRow {
|
||||
return tokenListRowFromKey(token.APIKey, token.Username)
|
||||
}
|
||||
|
||||
func tokenListRowFromKey(token codersdk.APIKey, owner string) tokenListRow {
|
||||
return tokenListRow{
|
||||
APIKey: token.APIKey,
|
||||
APIKey: token,
|
||||
ID: token.ID,
|
||||
TokenName: token.TokenName,
|
||||
Scopes: joinScopes(token.Scopes),
|
||||
Allow: joinAllowList(token.AllowList),
|
||||
LastUsed: token.LastUsed,
|
||||
ExpiresAt: token.ExpiresAt,
|
||||
CreatedAt: token.CreatedAt,
|
||||
Owner: token.Username,
|
||||
Owner: owner,
|
||||
}
|
||||
}
|
||||
|
||||
func joinScopes(scopes []codersdk.APIKeyScope) string {
|
||||
if len(scopes) == 0 {
|
||||
return ""
|
||||
}
|
||||
vals := slice.ToStrings(scopes)
|
||||
sort.Strings(vals)
|
||||
return strings.Join(vals, ", ")
|
||||
}
|
||||
|
||||
func joinAllowList(entries []codersdk.APIAllowListTarget) string {
|
||||
if len(entries) == 0 {
|
||||
return ""
|
||||
}
|
||||
vals := make([]string, len(entries))
|
||||
for i, entry := range entries {
|
||||
vals[i] = entry.String()
|
||||
}
|
||||
sort.Strings(vals)
|
||||
return strings.Join(vals, ", ")
|
||||
}
|
||||
|
||||
func (r *RootCmd) listTokens() *serpent.Command {
|
||||
// we only display the 'owner' column if the --all argument is passed in
|
||||
defaultCols := []string{"id", "name", "last used", "expires at", "created at"}
|
||||
defaultCols := []string{"id", "name", "scopes", "allow list", "last used", "expires at", "created at"}
|
||||
if slices.Contains(os.Args, "-a") || slices.Contains(os.Args, "--all") {
|
||||
defaultCols = append(defaultCols, "owner")
|
||||
}
|
||||
@@ -226,6 +282,48 @@ func (r *RootCmd) listTokens() *serpent.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (r *RootCmd) viewToken() *serpent.Command {
|
||||
formatter := cliui.NewOutputFormatter(
|
||||
cliui.TableFormat([]tokenListRow{}, []string{"id", "name", "scopes", "allow list", "last used", "expires at", "created at", "owner"}),
|
||||
cliui.JSONFormat(),
|
||||
)
|
||||
|
||||
cmd := &serpent.Command{
|
||||
Use: "view <name|id>",
|
||||
Short: "Display detailed information about a token",
|
||||
Middleware: serpent.Chain(
|
||||
serpent.RequireNArgs(1),
|
||||
),
|
||||
Handler: func(inv *serpent.Invocation) error {
|
||||
client, err := r.InitClient(inv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tokenName := inv.Args[0]
|
||||
token, err := client.APIKeyByName(inv.Context(), codersdk.Me, tokenName)
|
||||
if err != nil {
|
||||
maybeID := strings.Split(tokenName, "-")[0]
|
||||
token, err = client.APIKeyByID(inv.Context(), codersdk.Me, maybeID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fetch api key by name or id: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
row := tokenListRowFromKey(*token, "")
|
||||
out, err := formatter.Format(inv.Context(), []tokenListRow{row})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprintln(inv.Stdout, out)
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
formatter.AttachOptions(&cmd.Options)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (r *RootCmd) removeToken() *serpent.Command {
|
||||
cmd := &serpent.Command{
|
||||
Use: "remove <name|id|token>",
|
||||
|
||||
+56
-3
@@ -4,10 +4,13 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
@@ -46,6 +49,18 @@ func TestTokens(t *testing.T) {
|
||||
require.NotEmpty(t, res)
|
||||
id := res[:10]
|
||||
|
||||
allowWorkspaceID := uuid.New()
|
||||
allowSpec := fmt.Sprintf("workspace:%s", allowWorkspaceID.String())
|
||||
inv, root = clitest.New(t, "tokens", "create", "--name", "scoped-token", "--scope", string(codersdk.APIKeyScopeWorkspaceRead), "--allow", allowSpec)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf = new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
err = inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
res = buf.String()
|
||||
require.NotEmpty(t, res)
|
||||
scopedTokenID := res[:10]
|
||||
|
||||
// Test creating a token for second user from first user's (admin) session
|
||||
inv, root = clitest.New(t, "tokens", "create", "--name", "token-two", "--user", secondUser.ID.String())
|
||||
clitest.SetupConfig(t, client, root)
|
||||
@@ -67,7 +82,7 @@ func TestTokens(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
res = buf.String()
|
||||
require.NotEmpty(t, res)
|
||||
// Result should only contain the token created for the admin user
|
||||
// Result should only contain the tokens created for the admin user
|
||||
require.Contains(t, res, "ID")
|
||||
require.Contains(t, res, "EXPIRES AT")
|
||||
require.Contains(t, res, "CREATED AT")
|
||||
@@ -76,6 +91,16 @@ func TestTokens(t *testing.T) {
|
||||
// Result should not contain the token created for the second user
|
||||
require.NotContains(t, res, secondTokenID)
|
||||
|
||||
inv, root = clitest.New(t, "tokens", "view", "scoped-token")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf = new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
err = inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
res = buf.String()
|
||||
require.Contains(t, res, string(codersdk.APIKeyScopeWorkspaceRead))
|
||||
require.Contains(t, res, allowSpec)
|
||||
|
||||
// Test listing tokens from the second user's session
|
||||
inv, root = clitest.New(t, "tokens", "ls")
|
||||
clitest.SetupConfig(t, secondUserClient, root)
|
||||
@@ -101,6 +126,14 @@ func TestTokens(t *testing.T) {
|
||||
// User (non-admin) should not be able to create a token for another user
|
||||
require.Error(t, err)
|
||||
|
||||
inv, root = clitest.New(t, "tokens", "create", "--name", "invalid-allow", "--allow", "badvalue")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf = new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
err = inv.WithContext(ctx).Run()
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "invalid allow_list entry")
|
||||
|
||||
inv, root = clitest.New(t, "tokens", "ls", "--output=json")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf = new(bytes.Buffer)
|
||||
@@ -110,8 +143,17 @@ func TestTokens(t *testing.T) {
|
||||
|
||||
var tokens []codersdk.APIKey
|
||||
require.NoError(t, json.Unmarshal(buf.Bytes(), &tokens))
|
||||
require.Len(t, tokens, 1)
|
||||
require.Equal(t, id, tokens[0].ID)
|
||||
require.Len(t, tokens, 2)
|
||||
tokenByName := make(map[string]codersdk.APIKey, len(tokens))
|
||||
for _, tk := range tokens {
|
||||
tokenByName[tk.TokenName] = tk
|
||||
}
|
||||
require.Contains(t, tokenByName, "token-one")
|
||||
require.Contains(t, tokenByName, "scoped-token")
|
||||
scopedToken := tokenByName["scoped-token"]
|
||||
require.Contains(t, scopedToken.Scopes, codersdk.APIKeyScopeWorkspaceRead)
|
||||
require.Len(t, scopedToken.AllowList, 1)
|
||||
require.Equal(t, allowSpec, scopedToken.AllowList[0].String())
|
||||
|
||||
// Delete by name
|
||||
inv, root = clitest.New(t, "tokens", "rm", "token-one")
|
||||
@@ -135,6 +177,17 @@ func TestTokens(t *testing.T) {
|
||||
require.NotEmpty(t, res)
|
||||
require.Contains(t, res, "deleted")
|
||||
|
||||
// Delete scoped token by ID
|
||||
inv, root = clitest.New(t, "tokens", "rm", scopedTokenID)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf = new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
err = inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
res = buf.String()
|
||||
require.NotEmpty(t, res)
|
||||
require.Contains(t, res, "deleted")
|
||||
|
||||
// Create third token
|
||||
inv, root = clitest.New(t, "tokens", "create", "--name", "token-three")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
|
||||
@@ -239,6 +239,10 @@ func (a *API) Serve(ctx context.Context, l net.Listener) error {
|
||||
return xerrors.Errorf("create agent API server: %w", err)
|
||||
}
|
||||
|
||||
if err := a.ResourcesMonitoringAPI.InitMonitors(ctx); err != nil {
|
||||
return xerrors.Errorf("initialize resource monitoring: %w", err)
|
||||
}
|
||||
|
||||
return server.Serve(ctx, l)
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
@@ -33,42 +34,60 @@ type ResourcesMonitoringAPI struct {
|
||||
|
||||
Debounce time.Duration
|
||||
Config resourcesmonitor.Config
|
||||
|
||||
// Cache resource monitors on first call to avoid millions of DB queries per day.
|
||||
memoryMonitor database.WorkspaceAgentMemoryResourceMonitor
|
||||
volumeMonitors []database.WorkspaceAgentVolumeResourceMonitor
|
||||
monitorsLock sync.RWMutex
|
||||
}
|
||||
|
||||
func (a *ResourcesMonitoringAPI) GetResourcesMonitoringConfiguration(ctx context.Context, _ *proto.GetResourcesMonitoringConfigurationRequest) (*proto.GetResourcesMonitoringConfigurationResponse, error) {
|
||||
memoryMonitor, memoryErr := a.Database.FetchMemoryResourceMonitorsByAgentID(ctx, a.AgentID)
|
||||
if memoryErr != nil && !errors.Is(memoryErr, sql.ErrNoRows) {
|
||||
return nil, xerrors.Errorf("failed to fetch memory resource monitor: %w", memoryErr)
|
||||
// InitMonitors fetches resource monitors from the database and caches them.
|
||||
// This must be called once after creating a ResourcesMonitoringAPI, the context should be
|
||||
// the agent per-RPC connection context. If fetching fails with a real error (not sql.ErrNoRows), the
|
||||
// connection should be torn down.
|
||||
func (a *ResourcesMonitoringAPI) InitMonitors(ctx context.Context) error {
|
||||
memMon, err := a.Database.FetchMemoryResourceMonitorsByAgentID(ctx, a.AgentID)
|
||||
if err != nil && !errors.Is(err, sql.ErrNoRows) {
|
||||
return xerrors.Errorf("fetch memory resource monitor: %w", err)
|
||||
}
|
||||
// If sql.ErrNoRows, memoryMonitor stays as zero value (CreatedAt.IsZero() = true).
|
||||
// Otherwise, store the fetched monitor.
|
||||
if err == nil {
|
||||
a.memoryMonitor = memMon
|
||||
}
|
||||
|
||||
volumeMonitors, err := a.Database.FetchVolumesResourceMonitorsByAgentID(ctx, a.AgentID)
|
||||
volMons, err := a.Database.FetchVolumesResourceMonitorsByAgentID(ctx, a.AgentID)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to fetch volume resource monitors: %w", err)
|
||||
return xerrors.Errorf("fetch volume resource monitors: %w", err)
|
||||
}
|
||||
// 0 length is valid, indicating none configured, since the volume monitors in the DB can be many.
|
||||
a.volumeMonitors = volMons
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ResourcesMonitoringAPI) GetResourcesMonitoringConfiguration(_ context.Context, _ *proto.GetResourcesMonitoringConfigurationRequest) (*proto.GetResourcesMonitoringConfigurationResponse, error) {
|
||||
return &proto.GetResourcesMonitoringConfigurationResponse{
|
||||
Config: &proto.GetResourcesMonitoringConfigurationResponse_Config{
|
||||
CollectionIntervalSeconds: int32(a.Config.CollectionInterval.Seconds()),
|
||||
NumDatapoints: a.Config.NumDatapoints,
|
||||
},
|
||||
Memory: func() *proto.GetResourcesMonitoringConfigurationResponse_Memory {
|
||||
if memoryErr != nil {
|
||||
if a.memoryMonitor.CreatedAt.IsZero() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &proto.GetResourcesMonitoringConfigurationResponse_Memory{
|
||||
Enabled: memoryMonitor.Enabled,
|
||||
Enabled: a.memoryMonitor.Enabled,
|
||||
}
|
||||
}(),
|
||||
Volumes: func() []*proto.GetResourcesMonitoringConfigurationResponse_Volume {
|
||||
volumes := make([]*proto.GetResourcesMonitoringConfigurationResponse_Volume, 0, len(volumeMonitors))
|
||||
for _, monitor := range volumeMonitors {
|
||||
volumes := make([]*proto.GetResourcesMonitoringConfigurationResponse_Volume, 0, len(a.volumeMonitors))
|
||||
for _, monitor := range a.volumeMonitors {
|
||||
volumes = append(volumes, &proto.GetResourcesMonitoringConfigurationResponse_Volume{
|
||||
Enabled: monitor.Enabled,
|
||||
Path: monitor.Path,
|
||||
})
|
||||
}
|
||||
|
||||
return volumes
|
||||
}(),
|
||||
}, nil
|
||||
@@ -77,6 +96,10 @@ func (a *ResourcesMonitoringAPI) GetResourcesMonitoringConfiguration(ctx context
|
||||
func (a *ResourcesMonitoringAPI) PushResourcesMonitoringUsage(ctx context.Context, req *proto.PushResourcesMonitoringUsageRequest) (*proto.PushResourcesMonitoringUsageResponse, error) {
|
||||
var err error
|
||||
|
||||
// Lock for the entire push operation since calls are sequential from the agent
|
||||
a.monitorsLock.Lock()
|
||||
defer a.monitorsLock.Unlock()
|
||||
|
||||
if memoryErr := a.monitorMemory(ctx, req.Datapoints); memoryErr != nil {
|
||||
err = errors.Join(err, xerrors.Errorf("monitor memory: %w", memoryErr))
|
||||
}
|
||||
@@ -89,18 +112,7 @@ func (a *ResourcesMonitoringAPI) PushResourcesMonitoringUsage(ctx context.Contex
|
||||
}
|
||||
|
||||
func (a *ResourcesMonitoringAPI) monitorMemory(ctx context.Context, datapoints []*proto.PushResourcesMonitoringUsageRequest_Datapoint) error {
|
||||
monitor, err := a.Database.FetchMemoryResourceMonitorsByAgentID(ctx, a.AgentID)
|
||||
if err != nil {
|
||||
// It is valid for an agent to not have a memory monitor, so we
|
||||
// do not want to treat it as an error.
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return xerrors.Errorf("fetch memory resource monitor: %w", err)
|
||||
}
|
||||
|
||||
if !monitor.Enabled {
|
||||
if !a.memoryMonitor.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -109,15 +121,15 @@ func (a *ResourcesMonitoringAPI) monitorMemory(ctx context.Context, datapoints [
|
||||
usageDatapoints = append(usageDatapoints, datapoint.Memory)
|
||||
}
|
||||
|
||||
usageStates := resourcesmonitor.CalculateMemoryUsageStates(monitor, usageDatapoints)
|
||||
usageStates := resourcesmonitor.CalculateMemoryUsageStates(a.memoryMonitor, usageDatapoints)
|
||||
|
||||
oldState := monitor.State
|
||||
oldState := a.memoryMonitor.State
|
||||
newState := resourcesmonitor.NextState(a.Config, oldState, usageStates)
|
||||
|
||||
debouncedUntil, shouldNotify := monitor.Debounce(a.Debounce, a.Clock.Now(), oldState, newState)
|
||||
debouncedUntil, shouldNotify := a.memoryMonitor.Debounce(a.Debounce, a.Clock.Now(), oldState, newState)
|
||||
|
||||
//nolint:gocritic // We need to be able to update the resource monitor here.
|
||||
err = a.Database.UpdateMemoryResourceMonitor(dbauthz.AsResourceMonitor(ctx), database.UpdateMemoryResourceMonitorParams{
|
||||
err := a.Database.UpdateMemoryResourceMonitor(dbauthz.AsResourceMonitor(ctx), database.UpdateMemoryResourceMonitorParams{
|
||||
AgentID: a.AgentID,
|
||||
State: newState,
|
||||
UpdatedAt: dbtime.Time(a.Clock.Now()),
|
||||
@@ -127,6 +139,11 @@ func (a *ResourcesMonitoringAPI) monitorMemory(ctx context.Context, datapoints [
|
||||
return xerrors.Errorf("update workspace monitor: %w", err)
|
||||
}
|
||||
|
||||
// Update cached state
|
||||
a.memoryMonitor.State = newState
|
||||
a.memoryMonitor.DebouncedUntil = dbtime.Time(debouncedUntil)
|
||||
a.memoryMonitor.UpdatedAt = dbtime.Time(a.Clock.Now())
|
||||
|
||||
if !shouldNotify {
|
||||
return nil
|
||||
}
|
||||
@@ -143,7 +160,7 @@ func (a *ResourcesMonitoringAPI) monitorMemory(ctx context.Context, datapoints [
|
||||
notifications.TemplateWorkspaceOutOfMemory,
|
||||
map[string]string{
|
||||
"workspace": workspace.Name,
|
||||
"threshold": fmt.Sprintf("%d%%", monitor.Threshold),
|
||||
"threshold": fmt.Sprintf("%d%%", a.memoryMonitor.Threshold),
|
||||
},
|
||||
map[string]any{
|
||||
// NOTE(DanielleMaywood):
|
||||
@@ -169,14 +186,9 @@ func (a *ResourcesMonitoringAPI) monitorMemory(ctx context.Context, datapoints [
|
||||
}
|
||||
|
||||
func (a *ResourcesMonitoringAPI) monitorVolumes(ctx context.Context, datapoints []*proto.PushResourcesMonitoringUsageRequest_Datapoint) error {
|
||||
volumeMonitors, err := a.Database.FetchVolumesResourceMonitorsByAgentID(ctx, a.AgentID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get or insert volume monitor: %w", err)
|
||||
}
|
||||
|
||||
outOfDiskVolumes := make([]map[string]any, 0)
|
||||
|
||||
for _, monitor := range volumeMonitors {
|
||||
for i, monitor := range a.volumeMonitors {
|
||||
if !monitor.Enabled {
|
||||
continue
|
||||
}
|
||||
@@ -219,6 +231,11 @@ func (a *ResourcesMonitoringAPI) monitorVolumes(ctx context.Context, datapoints
|
||||
}); err != nil {
|
||||
return xerrors.Errorf("update workspace monitor: %w", err)
|
||||
}
|
||||
|
||||
// Update cached state
|
||||
a.volumeMonitors[i].State = newState
|
||||
a.volumeMonitors[i].DebouncedUntil = dbtime.Time(debouncedUntil)
|
||||
a.volumeMonitors[i].UpdatedAt = dbtime.Time(a.Clock.Now())
|
||||
}
|
||||
|
||||
if len(outOfDiskVolumes) == 0 {
|
||||
|
||||
@@ -101,6 +101,9 @@ func TestMemoryResourceMonitorDebounce(t *testing.T) {
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// Initialize API to fetch and cache the monitors
|
||||
require.NoError(t, api.InitMonitors(context.Background()))
|
||||
|
||||
// When: The monitor is given a state that will trigger NOK
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
@@ -304,6 +307,9 @@ func TestMemoryResourceMonitor(t *testing.T) {
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// Initialize API to fetch and cache the monitors
|
||||
require.NoError(t, api.InitMonitors(context.Background()))
|
||||
|
||||
clock.Set(collectedAt)
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: datapoints,
|
||||
@@ -337,6 +343,8 @@ func TestMemoryResourceMonitorMissingData(t *testing.T) {
|
||||
State: database.WorkspaceAgentMonitorStateOK,
|
||||
Threshold: 80,
|
||||
})
|
||||
// Initialize API to fetch and cache the monitors
|
||||
require.NoError(t, api.InitMonitors(context.Background()))
|
||||
|
||||
// When: A datapoint is missing, surrounded by two NOK datapoints.
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
@@ -387,6 +395,9 @@ func TestMemoryResourceMonitorMissingData(t *testing.T) {
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// Initialize API to fetch and cache the monitors
|
||||
require.NoError(t, api.InitMonitors(context.Background()))
|
||||
|
||||
// When: A datapoint is missing, surrounded by two OK datapoints.
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
@@ -466,6 +477,9 @@ func TestVolumeResourceMonitorDebounce(t *testing.T) {
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// Initialize API to fetch and cache the monitors
|
||||
require.NoError(t, api.InitMonitors(context.Background()))
|
||||
|
||||
// When:
|
||||
// - First monitor is in a NOK state
|
||||
// - Second monitor is in an OK state
|
||||
@@ -742,6 +756,9 @@ func TestVolumeResourceMonitor(t *testing.T) {
|
||||
Threshold: tt.thresholdPercent,
|
||||
})
|
||||
|
||||
// Initialize API to fetch and cache the monitors
|
||||
require.NoError(t, api.InitMonitors(context.Background()))
|
||||
|
||||
clock.Set(collectedAt)
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: datapoints,
|
||||
@@ -780,6 +797,9 @@ func TestVolumeResourceMonitorMultiple(t *testing.T) {
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// Initialize API to fetch and cache the monitors
|
||||
require.NoError(t, api.InitMonitors(context.Background()))
|
||||
|
||||
// When: both of them move to a NOK state
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
@@ -832,6 +852,9 @@ func TestVolumeResourceMonitorMissingData(t *testing.T) {
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// Initialize API to fetch and cache the monitors
|
||||
require.NoError(t, api.InitMonitors(context.Background()))
|
||||
|
||||
// When: A datapoint is missing, surrounded by two NOK datapoints.
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
@@ -891,6 +914,9 @@ func TestVolumeResourceMonitorMissingData(t *testing.T) {
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// Initialize API to fetch and cache the monitors
|
||||
require.NoError(t, api.InitMonitors(context.Background()))
|
||||
|
||||
// When: A datapoint is missing, surrounded by two OK datapoints.
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
|
||||
+25
-57
@@ -7,7 +7,6 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -24,62 +23,12 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/rbac/policy"
|
||||
"github.com/coder/coder/v2/coderd/searchquery"
|
||||
"github.com/coder/coder/v2/coderd/taskname"
|
||||
"github.com/coder/coder/v2/coderd/util/slice"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
|
||||
aiagentapi "github.com/coder/agentapi-sdk-go"
|
||||
)
|
||||
|
||||
// This endpoint is experimental and not guaranteed to be stable, so we're not
|
||||
// generating public-facing documentation for it.
|
||||
func (api *API) aiTasksPrompts(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
buildIDsParam := r.URL.Query().Get("build_ids")
|
||||
if buildIDsParam == "" {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "build_ids query parameter is required",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Parse build IDs
|
||||
buildIDStrings := strings.Split(buildIDsParam, ",")
|
||||
buildIDs := make([]uuid.UUID, 0, len(buildIDStrings))
|
||||
for _, idStr := range buildIDStrings {
|
||||
id, err := uuid.Parse(strings.TrimSpace(idStr))
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: fmt.Sprintf("Invalid build ID format: %s", idStr),
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
buildIDs = append(buildIDs, id)
|
||||
}
|
||||
|
||||
parameters, err := api.Database.GetWorkspaceBuildParametersByBuildIDs(ctx, buildIDs)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error fetching workspace build parameters.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
promptsByBuildID := make(map[string]string, len(parameters))
|
||||
for _, param := range parameters {
|
||||
if param.Name != codersdk.AITaskPromptParameterName {
|
||||
continue
|
||||
}
|
||||
buildID := param.WorkspaceBuildID.String()
|
||||
promptsByBuildID[buildID] = param.Value
|
||||
}
|
||||
|
||||
httpapi.Write(ctx, rw, http.StatusOK, codersdk.AITasksPromptsResponse{
|
||||
Prompts: promptsByBuildID,
|
||||
})
|
||||
}
|
||||
|
||||
// @Summary Create a new AI task
|
||||
// @Description: EXPERIMENTAL: this endpoint is experimental and not guaranteed to be stable.
|
||||
// @ID create-task
|
||||
@@ -174,13 +123,31 @@ func (api *API) tasksCreate(rw http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the template defines the AI Prompt parameter.
|
||||
templateParams, err := api.Database.GetTemplateVersionParameters(ctx, req.TemplateVersionID)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error fetching template parameters.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
var richParams []codersdk.WorkspaceBuildParameter
|
||||
if _, hasAIPromptParam := slice.Find(templateParams, func(param database.TemplateVersionParameter) bool {
|
||||
return param.Name == codersdk.AITaskPromptParameterName
|
||||
}); hasAIPromptParam {
|
||||
// Only add the AI Prompt parameter if the template defines it.
|
||||
richParams = []codersdk.WorkspaceBuildParameter{
|
||||
{Name: codersdk.AITaskPromptParameterName, Value: req.Input},
|
||||
}
|
||||
}
|
||||
|
||||
createReq := codersdk.CreateWorkspaceRequest{
|
||||
Name: taskName,
|
||||
TemplateVersionID: req.TemplateVersionID,
|
||||
TemplateVersionPresetID: req.TemplateVersionPresetID,
|
||||
RichParameterValues: []codersdk.WorkspaceBuildParameter{
|
||||
{Name: codersdk.AITaskPromptParameterName, Value: req.Input},
|
||||
},
|
||||
RichParameterValues: richParams,
|
||||
}
|
||||
|
||||
var owner workspaceOwner
|
||||
@@ -241,6 +208,7 @@ func (api *API) tasksCreate(rw http.ResponseWriter, r *http.Request) {
|
||||
// Create task record in the database before creating the workspace so that
|
||||
// we can request that the workspace be linked to it after creation.
|
||||
dbTaskTable, err = tx.InsertTask(ctx, database.InsertTaskParams{
|
||||
ID: uuid.New(),
|
||||
OrganizationID: templateVersion.OrganizationID,
|
||||
OwnerID: owner.ID,
|
||||
Name: taskName,
|
||||
@@ -338,8 +306,8 @@ func taskFromDBTaskAndWorkspace(dbTask database.Task, ws codersdk.Workspace) cod
|
||||
ID: dbTask.ID,
|
||||
OrganizationID: dbTask.OrganizationID,
|
||||
OwnerID: dbTask.OwnerID,
|
||||
OwnerName: ws.OwnerName,
|
||||
OwnerAvatarURL: ws.OwnerAvatarURL,
|
||||
OwnerName: dbTask.OwnerUsername,
|
||||
OwnerAvatarURL: dbTask.OwnerAvatarUrl,
|
||||
Name: dbTask.Name,
|
||||
TemplateID: ws.TemplateID,
|
||||
TemplateVersionID: dbTask.TemplateVersionID,
|
||||
|
||||
+154
-172
@@ -1,15 +1,13 @@
|
||||
package coderd_test
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -34,128 +32,6 @@ import (
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
func TestAITasksPrompts(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("EmptyBuildIDs", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, &coderdtest.Options{})
|
||||
_ = coderdtest.CreateFirstUser(t, client)
|
||||
experimentalClient := codersdk.NewExperimentalClient(client)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
// Test with empty build IDs
|
||||
prompts, err := experimentalClient.AITaskPrompts(ctx, []uuid.UUID{})
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, prompts.Prompts)
|
||||
})
|
||||
|
||||
t.Run("MultipleBuilds", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
adminClient := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
first := coderdtest.CreateFirstUser(t, adminClient)
|
||||
memberClient, _ := coderdtest.CreateAnotherUser(t, adminClient, first.OrganizationID)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
// Create a template with parameters
|
||||
version := coderdtest.CreateTemplateVersion(t, adminClient, first.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionPlan: []*proto.Response{{
|
||||
Type: &proto.Response_Plan{
|
||||
Plan: &proto.PlanComplete{
|
||||
Parameters: []*proto.RichParameter{
|
||||
{
|
||||
Name: "param1",
|
||||
Type: "string",
|
||||
DefaultValue: "default1",
|
||||
},
|
||||
{
|
||||
Name: codersdk.AITaskPromptParameterName,
|
||||
Type: "string",
|
||||
DefaultValue: "default2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
ProvisionApply: echo.ApplyComplete,
|
||||
})
|
||||
template := coderdtest.CreateTemplate(t, adminClient, first.OrganizationID, version.ID)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, adminClient, version.ID)
|
||||
|
||||
// Create two workspaces with different parameters
|
||||
workspace1 := coderdtest.CreateWorkspace(t, memberClient, template.ID, func(request *codersdk.CreateWorkspaceRequest) {
|
||||
request.RichParameterValues = []codersdk.WorkspaceBuildParameter{
|
||||
{Name: "param1", Value: "value1a"},
|
||||
{Name: codersdk.AITaskPromptParameterName, Value: "value2a"},
|
||||
}
|
||||
})
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, memberClient, workspace1.LatestBuild.ID)
|
||||
|
||||
workspace2 := coderdtest.CreateWorkspace(t, memberClient, template.ID, func(request *codersdk.CreateWorkspaceRequest) {
|
||||
request.RichParameterValues = []codersdk.WorkspaceBuildParameter{
|
||||
{Name: "param1", Value: "value1b"},
|
||||
{Name: codersdk.AITaskPromptParameterName, Value: "value2b"},
|
||||
}
|
||||
})
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, memberClient, workspace2.LatestBuild.ID)
|
||||
|
||||
workspace3 := coderdtest.CreateWorkspace(t, adminClient, template.ID, func(request *codersdk.CreateWorkspaceRequest) {
|
||||
request.RichParameterValues = []codersdk.WorkspaceBuildParameter{
|
||||
{Name: "param1", Value: "value1c"},
|
||||
{Name: codersdk.AITaskPromptParameterName, Value: "value2c"},
|
||||
}
|
||||
})
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, adminClient, workspace3.LatestBuild.ID)
|
||||
allBuildIDs := []uuid.UUID{workspace1.LatestBuild.ID, workspace2.LatestBuild.ID, workspace3.LatestBuild.ID}
|
||||
|
||||
experimentalMemberClient := codersdk.NewExperimentalClient(memberClient)
|
||||
// Test parameters endpoint as member
|
||||
prompts, err := experimentalMemberClient.AITaskPrompts(ctx, allBuildIDs)
|
||||
require.NoError(t, err)
|
||||
// we expect 2 prompts because the member client does not have access to workspace3
|
||||
// since it was created by the admin client
|
||||
require.Len(t, prompts.Prompts, 2)
|
||||
|
||||
// Check workspace1 parameters
|
||||
build1Prompt := prompts.Prompts[workspace1.LatestBuild.ID.String()]
|
||||
require.Equal(t, "value2a", build1Prompt)
|
||||
|
||||
// Check workspace2 parameters
|
||||
build2Prompt := prompts.Prompts[workspace2.LatestBuild.ID.String()]
|
||||
require.Equal(t, "value2b", build2Prompt)
|
||||
|
||||
experimentalAdminClient := codersdk.NewExperimentalClient(adminClient)
|
||||
// Test parameters endpoint as admin
|
||||
// we expect 3 prompts because the admin client has access to all workspaces
|
||||
prompts, err = experimentalAdminClient.AITaskPrompts(ctx, allBuildIDs)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, prompts.Prompts, 3)
|
||||
|
||||
// Check workspace3 parameters
|
||||
build3Prompt := prompts.Prompts[workspace3.LatestBuild.ID.String()]
|
||||
require.Equal(t, "value2c", build3Prompt)
|
||||
})
|
||||
|
||||
t.Run("NonExistentBuildIDs", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, &coderdtest.Options{})
|
||||
_ = coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
// Test with non-existent build IDs
|
||||
nonExistentID := uuid.New()
|
||||
experimentalClient := codersdk.NewExperimentalClient(client)
|
||||
prompts, err := experimentalClient.AITaskPrompts(ctx, []uuid.UUID{nonExistentID})
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, prompts.Prompts)
|
||||
})
|
||||
}
|
||||
|
||||
func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -187,7 +63,6 @@ func TestTasks(t *testing.T) {
|
||||
{
|
||||
Type: &proto.Response_Plan{
|
||||
Plan: &proto.PlanComplete{
|
||||
Parameters: []*proto.RichParameter{{Name: codersdk.AITaskPromptParameterName, Type: "string"}},
|
||||
HasAiTasks: true,
|
||||
},
|
||||
},
|
||||
@@ -258,6 +133,9 @@ func TestTasks(t *testing.T) {
|
||||
// Wait for the workspace to be built.
|
||||
workspace, err := client.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
if assert.True(t, workspace.TaskID.Valid, "task id should be set on workspace") {
|
||||
assert.Equal(t, task.ID, workspace.TaskID.UUID, "workspace task id should match")
|
||||
}
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
// List tasks via experimental API and verify the prompt and status mapping.
|
||||
@@ -296,6 +174,9 @@ func TestTasks(t *testing.T) {
|
||||
// Get the workspace and wait for it to be ready.
|
||||
ws, err := client.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
if assert.True(t, ws.TaskID.Valid, "task id should be set on workspace") {
|
||||
assert.Equal(t, task.ID, ws.TaskID.UUID, "workspace task id should match")
|
||||
}
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID)
|
||||
ws = coderdtest.MustWorkspace(t, client, task.WorkspaceID.UUID)
|
||||
// Assert invariant: the workspace has exactly one resource with one agent with one app.
|
||||
@@ -370,24 +251,23 @@ func TestTasks(t *testing.T) {
|
||||
require.True(t, task.WorkspaceID.Valid, "task should have a workspace ID")
|
||||
ws, err := client.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
if assert.True(t, ws.TaskID.Valid, "task id should be set on workspace") {
|
||||
assert.Equal(t, task.ID, ws.TaskID.UUID, "workspace task id should match")
|
||||
}
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID)
|
||||
|
||||
err = exp.DeleteTask(ctx, "me", task.ID)
|
||||
require.NoError(t, err, "delete task request should be accepted")
|
||||
|
||||
// Poll until the workspace is deleted.
|
||||
for {
|
||||
testutil.Eventually(ctx, t, func(ctx context.Context) (done bool) {
|
||||
dws, derr := client.DeletedWorkspace(ctx, task.WorkspaceID.UUID)
|
||||
if derr == nil && dws.LatestBuild.Status == codersdk.WorkspaceStatusDeleted {
|
||||
break
|
||||
if !assert.NoError(t, derr, "expected to fetch deleted workspace before deadline") {
|
||||
return false
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
require.NoError(t, derr, "expected to fetch deleted workspace before deadline")
|
||||
require.Equal(t, codersdk.WorkspaceStatusDeleted, dws.LatestBuild.Status, "workspace should be deleted before deadline")
|
||||
break
|
||||
}
|
||||
time.Sleep(testutil.IntervalMedium)
|
||||
}
|
||||
t.Logf("workspace latest_build status: %q", dws.LatestBuild.Status)
|
||||
return dws.LatestBuild.Status == codersdk.WorkspaceStatusDeleted
|
||||
}, testutil.IntervalMedium, "workspace should be deleted before deadline")
|
||||
})
|
||||
|
||||
t.Run("NotFound", func(t *testing.T) {
|
||||
@@ -420,6 +300,9 @@ func TestTasks(t *testing.T) {
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
ws := coderdtest.CreateWorkspace(t, client, template.ID)
|
||||
if assert.False(t, ws.TaskID.Valid, "task id should not be set on non-task workspace") {
|
||||
assert.Zero(t, ws.TaskID, "non-task workspace task id should be empty")
|
||||
}
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID)
|
||||
|
||||
exp := codersdk.NewExperimentalClient(client)
|
||||
@@ -468,6 +351,72 @@ func TestTasks(t *testing.T) {
|
||||
t.Fatalf("unexpected status code: %d (expected 403 or 404)", authErr.StatusCode())
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("DeletedWorkspace", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
template := createAITemplate(t, client, user)
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
exp := codersdk.NewExperimentalClient(client)
|
||||
task, err := exp.CreateTask(ctx, "me", codersdk.CreateTaskRequest{
|
||||
TemplateVersionID: template.ActiveVersionID,
|
||||
Input: "delete me",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.True(t, task.WorkspaceID.Valid, "task should have a workspace ID")
|
||||
ws, err := client.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID)
|
||||
|
||||
// Mark the workspace as deleted directly in the database, bypassing provisionerd.
|
||||
require.NoError(t, db.UpdateWorkspaceDeletedByID(dbauthz.AsProvisionerd(ctx), database.UpdateWorkspaceDeletedByIDParams{
|
||||
ID: ws.ID,
|
||||
Deleted: true,
|
||||
}))
|
||||
// We should still be able to fetch the task if its workspace was deleted.
|
||||
// Provisionerdserver will attempt delete the related task when deleting a workspace.
|
||||
// This test ensures that we can still handle the case where, for some reason, the
|
||||
// task has not been marked as deleted, but the workspace has.
|
||||
task, err = exp.TaskByID(ctx, task.ID)
|
||||
require.NoError(t, err, "fetching a task should still work if its related workspace is deleted")
|
||||
err = exp.DeleteTask(ctx, task.OwnerID.String(), task.ID)
|
||||
require.NoError(t, err, "should be possible to delete a task with no workspace")
|
||||
})
|
||||
|
||||
t.Run("DeletingTaskWorkspaceDeletesTask", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
template := createAITemplate(t, client, user)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
exp := codersdk.NewExperimentalClient(client)
|
||||
task, err := exp.CreateTask(ctx, "me", codersdk.CreateTaskRequest{
|
||||
TemplateVersionID: template.ActiveVersionID,
|
||||
Input: "delete me",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.True(t, task.WorkspaceID.Valid, "task should have a workspace ID")
|
||||
ws, err := client.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
if assert.True(t, ws.TaskID.Valid, "task id should be set on workspace") {
|
||||
assert.Equal(t, task.ID, ws.TaskID.UUID, "workspace task id should match")
|
||||
}
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID)
|
||||
|
||||
// When; the task workspace is deleted
|
||||
coderdtest.MustTransitionWorkspace(t, client, ws.ID, codersdk.WorkspaceTransitionStart, codersdk.WorkspaceTransitionDelete)
|
||||
// Then: the task associated with the workspace is also deleted
|
||||
_, err = exp.TaskByID(ctx, task.ID)
|
||||
require.Error(t, err, "expected an error fetching the task")
|
||||
var sdkErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &sdkErr, "expected a codersdk.Error")
|
||||
require.Equal(t, http.StatusNotFound, sdkErr.StatusCode())
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Send", func(t *testing.T) {
|
||||
@@ -782,6 +731,51 @@ func TestTasksCreate(t *testing.T) {
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionApply: echo.ApplyComplete,
|
||||
ProvisionPlan: []*proto.Response{
|
||||
{Type: &proto.Response_Plan{Plan: &proto.PlanComplete{
|
||||
HasAiTasks: true,
|
||||
}}},
|
||||
},
|
||||
})
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
|
||||
expClient := codersdk.NewExperimentalClient(client)
|
||||
|
||||
task, err := expClient.CreateTask(ctx, "me", codersdk.CreateTaskRequest{
|
||||
TemplateVersionID: template.ActiveVersionID,
|
||||
Input: taskPrompt,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.True(t, task.WorkspaceID.Valid)
|
||||
|
||||
ws, err := client.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID)
|
||||
|
||||
assert.NotEmpty(t, task.Name)
|
||||
assert.Equal(t, template.ID, task.TemplateID)
|
||||
|
||||
parameters, err := client.WorkspaceBuildParameters(ctx, ws.LatestBuild.ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, parameters, 0)
|
||||
})
|
||||
|
||||
t.Run("OK AIPromptBackCompat", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
ctx = testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
taskPrompt = "Some task prompt"
|
||||
)
|
||||
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Given: A template with an "AI Prompt" parameter
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
@@ -861,7 +855,6 @@ func TestTasksCreate(t *testing.T) {
|
||||
ProvisionApply: echo.ApplyComplete,
|
||||
ProvisionPlan: []*proto.Response{
|
||||
{Type: &proto.Response_Plan{Plan: &proto.PlanComplete{
|
||||
Parameters: []*proto.RichParameter{{Name: codersdk.AITaskPromptParameterName, Type: "string"}},
|
||||
HasAiTasks: true,
|
||||
}}},
|
||||
},
|
||||
@@ -977,7 +970,6 @@ func TestTasksCreate(t *testing.T) {
|
||||
ProvisionApply: echo.ApplyComplete,
|
||||
ProvisionPlan: []*proto.Response{
|
||||
{Type: &proto.Response_Plan{Plan: &proto.PlanComplete{
|
||||
Parameters: []*proto.RichParameter{{Name: codersdk.AITaskPromptParameterName, Type: "string"}},
|
||||
HasAiTasks: true,
|
||||
}}},
|
||||
},
|
||||
@@ -1037,7 +1029,6 @@ func TestTasksCreate(t *testing.T) {
|
||||
ProvisionApply: echo.ApplyComplete,
|
||||
ProvisionPlan: []*proto.Response{
|
||||
{Type: &proto.Response_Plan{Plan: &proto.PlanComplete{
|
||||
Parameters: []*proto.RichParameter{{Name: codersdk.AITaskPromptParameterName, Type: "string"}},
|
||||
HasAiTasks: true,
|
||||
}}},
|
||||
},
|
||||
@@ -1074,7 +1065,6 @@ func TestTasksCreate(t *testing.T) {
|
||||
ProvisionApply: echo.ApplyComplete,
|
||||
ProvisionPlan: []*proto.Response{
|
||||
{Type: &proto.Response_Plan{Plan: &proto.PlanComplete{
|
||||
Parameters: []*proto.RichParameter{{Name: codersdk.AITaskPromptParameterName, Type: "string"}},
|
||||
HasAiTasks: true,
|
||||
}}},
|
||||
},
|
||||
@@ -1127,7 +1117,6 @@ func TestTasksCreate(t *testing.T) {
|
||||
ProvisionApply: echo.ApplyComplete,
|
||||
ProvisionPlan: []*proto.Response{
|
||||
{Type: &proto.Response_Plan{Plan: &proto.PlanComplete{
|
||||
Parameters: []*proto.RichParameter{{Name: codersdk.AITaskPromptParameterName, Type: "string"}},
|
||||
HasAiTasks: true,
|
||||
}}},
|
||||
},
|
||||
@@ -1140,7 +1129,6 @@ func TestTasksCreate(t *testing.T) {
|
||||
ProvisionApply: echo.ApplyComplete,
|
||||
ProvisionPlan: []*proto.Response{
|
||||
{Type: &proto.Response_Plan{Plan: &proto.PlanComplete{
|
||||
Parameters: []*proto.RichParameter{{Name: codersdk.AITaskPromptParameterName, Type: "string"}},
|
||||
HasAiTasks: true,
|
||||
}}},
|
||||
},
|
||||
@@ -1334,31 +1322,31 @@ func TestTasksNotification(t *testing.T) {
|
||||
// Given: a workspace build with an agent containing an App
|
||||
workspaceAgentAppID := uuid.New()
|
||||
workspaceBuildID := uuid.New()
|
||||
workspaceBuildSeed := database.WorkspaceBuild{
|
||||
ID: workspaceBuildID,
|
||||
}
|
||||
if tc.isAITask {
|
||||
workspaceBuildSeed = database.WorkspaceBuild{
|
||||
ID: workspaceBuildID,
|
||||
// AI Task configuration
|
||||
HasAITask: sql.NullBool{Bool: true, Valid: true},
|
||||
AITaskSidebarAppID: uuid.NullUUID{UUID: workspaceAgentAppID, Valid: true},
|
||||
}
|
||||
}
|
||||
workspaceBuild := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
workspaceBuilder := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: ownerUser.OrganizationID,
|
||||
OwnerID: memberUser.ID,
|
||||
}).Seed(workspaceBuildSeed).Params(database.WorkspaceBuildParameter{
|
||||
WorkspaceBuildID: workspaceBuildID,
|
||||
Name: codersdk.AITaskPromptParameterName,
|
||||
Value: tc.taskPrompt,
|
||||
}).WithAgent(func(agent []*proto.Agent) []*proto.Agent {
|
||||
agent[0].Apps = []*proto.App{{
|
||||
Id: workspaceAgentAppID.String(),
|
||||
Slug: "ccw",
|
||||
}}
|
||||
return agent
|
||||
}).Do()
|
||||
}).Seed(database.WorkspaceBuild{
|
||||
ID: workspaceBuildID,
|
||||
})
|
||||
if tc.isAITask {
|
||||
workspaceBuilder = workspaceBuilder.
|
||||
WithTask(database.TaskTable{
|
||||
Prompt: tc.taskPrompt,
|
||||
}, &proto.App{
|
||||
Id: workspaceAgentAppID.String(),
|
||||
Slug: "ccw",
|
||||
})
|
||||
} else {
|
||||
workspaceBuilder = workspaceBuilder.
|
||||
WithAgent(func(agent []*proto.Agent) []*proto.Agent {
|
||||
agent[0].Apps = []*proto.App{{
|
||||
Id: workspaceAgentAppID.String(),
|
||||
Slug: "ccw",
|
||||
}}
|
||||
return agent
|
||||
})
|
||||
}
|
||||
workspaceBuild := workspaceBuilder.Do()
|
||||
|
||||
// Given: the workspace agent app has previous statuses
|
||||
agentClient := agentsdk.New(client.URL, agentsdk.WithFixedToken(workspaceBuild.AgentToken))
|
||||
@@ -1399,13 +1387,7 @@ func TestTasksNotification(t *testing.T) {
|
||||
require.Len(t, sent, 1)
|
||||
require.Equal(t, memberUser.ID, sent[0].UserID)
|
||||
require.Len(t, sent[0].Labels, 2)
|
||||
// NOTE: len(string) is the number of bytes in the string, not the number of runes.
|
||||
require.LessOrEqual(t, utf8.RuneCountInString(sent[0].Labels["task"]), 160)
|
||||
if len(tc.taskPrompt) > 160 {
|
||||
require.Contains(t, tc.taskPrompt, strings.TrimSuffix(sent[0].Labels["task"], "…"))
|
||||
} else {
|
||||
require.Equal(t, tc.taskPrompt, sent[0].Labels["task"])
|
||||
}
|
||||
require.Equal(t, workspaceBuild.Task.Name, sent[0].Labels["task"])
|
||||
require.Equal(t, workspace.Name, sent[0].Labels["workspace"])
|
||||
} else {
|
||||
// Then: No notification is sent
|
||||
|
||||
Generated
+63
-11
@@ -85,7 +85,7 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/experimental/aibridge/interceptions": {
|
||||
"/aibridge/interceptions": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
@@ -11668,12 +11668,35 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.AIBridgeBedrockConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"access_key": {
|
||||
"type": "string"
|
||||
},
|
||||
"access_key_secret": {
|
||||
"type": "string"
|
||||
},
|
||||
"model": {
|
||||
"type": "string"
|
||||
},
|
||||
"region": {
|
||||
"type": "string"
|
||||
},
|
||||
"small_fast_model": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.AIBridgeConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"anthropic": {
|
||||
"$ref": "#/definitions/codersdk.AIBridgeAnthropicConfig"
|
||||
},
|
||||
"bedrock": {
|
||||
"$ref": "#/definitions/codersdk.AIBridgeBedrockConfig"
|
||||
},
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -11685,6 +11708,10 @@ const docTemplate = `{
|
||||
"codersdk.AIBridgeInterception": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ended_at": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
@@ -12496,6 +12523,13 @@ const docTemplate = `{
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"organization_member_permissions": {
|
||||
"description": "OrganizationMemberPermissions are specific for the organization in the field 'OrganizationID' above.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Permission"
|
||||
}
|
||||
},
|
||||
"organization_permissions": {
|
||||
"description": "OrganizationPermissions are specific for the organization in the field 'OrganizationID' above.",
|
||||
"type": "array",
|
||||
@@ -13719,6 +13753,13 @@ const docTemplate = `{
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"organization_member_permissions": {
|
||||
"description": "OrganizationMemberPermissions are specific to the organization the role belongs to.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Permission"
|
||||
}
|
||||
},
|
||||
"organization_permissions": {
|
||||
"description": "OrganizationPermissions are specific to the organization the role belongs to.",
|
||||
"type": "array",
|
||||
@@ -14275,11 +14316,9 @@ const docTemplate = `{
|
||||
"web-push",
|
||||
"oauth2",
|
||||
"mcp-server-http",
|
||||
"workspace-sharing",
|
||||
"aibridge"
|
||||
"workspace-sharing"
|
||||
],
|
||||
"x-enum-comments": {
|
||||
"ExperimentAIBridge": "Enables AI Bridge functionality.",
|
||||
"ExperimentAutoFillParameters": "This should not be taken out of experiments until we have redesigned the feature.",
|
||||
"ExperimentExample": "This isn't used for anything.",
|
||||
"ExperimentMCPServerHTTP": "Enables the MCP HTTP server functionality.",
|
||||
@@ -14297,8 +14336,7 @@ const docTemplate = `{
|
||||
"ExperimentWebPush",
|
||||
"ExperimentOAuth2",
|
||||
"ExperimentMCPServerHTTP",
|
||||
"ExperimentWorkspaceSharing",
|
||||
"ExperimentAIBridge"
|
||||
"ExperimentWorkspaceSharing"
|
||||
]
|
||||
},
|
||||
"codersdk.ExternalAPIKeyScopes": {
|
||||
@@ -15397,6 +15435,9 @@ const docTemplate = `{
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"revocation_endpoint": {
|
||||
"type": "string"
|
||||
},
|
||||
"scopes_supported": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
@@ -17486,6 +17527,13 @@ const docTemplate = `{
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"organization_member_permissions": {
|
||||
"description": "OrganizationMemberPermissions are specific for the organization in the field 'OrganizationID' above.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Permission"
|
||||
}
|
||||
},
|
||||
"organization_permissions": {
|
||||
"description": "OrganizationPermissions are specific for the organization in the field 'OrganizationID' above.",
|
||||
"type": "array",
|
||||
@@ -19667,6 +19715,14 @@ const docTemplate = `{
|
||||
"description": "OwnerName is the username of the owner of the workspace.",
|
||||
"type": "string"
|
||||
},
|
||||
"task_id": {
|
||||
"description": "TaskID, if set, indicates that the workspace is relevant to the given codersdk.Task.",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/uuid.NullUUID"
|
||||
}
|
||||
]
|
||||
},
|
||||
"template_active_version_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
@@ -20474,7 +20530,7 @@ const docTemplate = `{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ai_task_sidebar_app_id": {
|
||||
"description": "Deprecated: This field has been replaced with ` + "`" + `TaskAppID` + "`" + `",
|
||||
"description": "Deprecated: This field has been replaced with ` + "`" + `Task.WorkspaceAppID` + "`" + `",
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
@@ -20556,10 +20612,6 @@ const docTemplate = `{
|
||||
}
|
||||
]
|
||||
},
|
||||
"task_app_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"template_version_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
|
||||
Generated
+63
-11
@@ -65,7 +65,7 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/experimental/aibridge/interceptions": {
|
||||
"/aibridge/interceptions": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
@@ -10364,12 +10364,35 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.AIBridgeBedrockConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"access_key": {
|
||||
"type": "string"
|
||||
},
|
||||
"access_key_secret": {
|
||||
"type": "string"
|
||||
},
|
||||
"model": {
|
||||
"type": "string"
|
||||
},
|
||||
"region": {
|
||||
"type": "string"
|
||||
},
|
||||
"small_fast_model": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.AIBridgeConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"anthropic": {
|
||||
"$ref": "#/definitions/codersdk.AIBridgeAnthropicConfig"
|
||||
},
|
||||
"bedrock": {
|
||||
"$ref": "#/definitions/codersdk.AIBridgeBedrockConfig"
|
||||
},
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -10381,6 +10404,10 @@
|
||||
"codersdk.AIBridgeInterception": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ended_at": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
@@ -11178,6 +11205,13 @@
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"organization_member_permissions": {
|
||||
"description": "OrganizationMemberPermissions are specific for the organization in the field 'OrganizationID' above.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Permission"
|
||||
}
|
||||
},
|
||||
"organization_permissions": {
|
||||
"description": "OrganizationPermissions are specific for the organization in the field 'OrganizationID' above.",
|
||||
"type": "array",
|
||||
@@ -12333,6 +12367,13 @@
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"organization_member_permissions": {
|
||||
"description": "OrganizationMemberPermissions are specific to the organization the role belongs to.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Permission"
|
||||
}
|
||||
},
|
||||
"organization_permissions": {
|
||||
"description": "OrganizationPermissions are specific to the organization the role belongs to.",
|
||||
"type": "array",
|
||||
@@ -12882,11 +12923,9 @@
|
||||
"web-push",
|
||||
"oauth2",
|
||||
"mcp-server-http",
|
||||
"workspace-sharing",
|
||||
"aibridge"
|
||||
"workspace-sharing"
|
||||
],
|
||||
"x-enum-comments": {
|
||||
"ExperimentAIBridge": "Enables AI Bridge functionality.",
|
||||
"ExperimentAutoFillParameters": "This should not be taken out of experiments until we have redesigned the feature.",
|
||||
"ExperimentExample": "This isn't used for anything.",
|
||||
"ExperimentMCPServerHTTP": "Enables the MCP HTTP server functionality.",
|
||||
@@ -12904,8 +12943,7 @@
|
||||
"ExperimentWebPush",
|
||||
"ExperimentOAuth2",
|
||||
"ExperimentMCPServerHTTP",
|
||||
"ExperimentWorkspaceSharing",
|
||||
"ExperimentAIBridge"
|
||||
"ExperimentWorkspaceSharing"
|
||||
]
|
||||
},
|
||||
"codersdk.ExternalAPIKeyScopes": {
|
||||
@@ -13951,6 +13989,9 @@
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"revocation_endpoint": {
|
||||
"type": "string"
|
||||
},
|
||||
"scopes_supported": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
@@ -15978,6 +16019,13 @@
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"organization_member_permissions": {
|
||||
"description": "OrganizationMemberPermissions are specific for the organization in the field 'OrganizationID' above.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Permission"
|
||||
}
|
||||
},
|
||||
"organization_permissions": {
|
||||
"description": "OrganizationPermissions are specific for the organization in the field 'OrganizationID' above.",
|
||||
"type": "array",
|
||||
@@ -18053,6 +18101,14 @@
|
||||
"description": "OwnerName is the username of the owner of the workspace.",
|
||||
"type": "string"
|
||||
},
|
||||
"task_id": {
|
||||
"description": "TaskID, if set, indicates that the workspace is relevant to the given codersdk.Task.",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/uuid.NullUUID"
|
||||
}
|
||||
]
|
||||
},
|
||||
"template_active_version_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
@@ -18808,7 +18864,7 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ai_task_sidebar_app_id": {
|
||||
"description": "Deprecated: This field has been replaced with `TaskAppID`",
|
||||
"description": "Deprecated: This field has been replaced with `Task.WorkspaceAppID`",
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
@@ -18886,10 +18942,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"task_app_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"template_version_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
|
||||
+2
-2
@@ -509,11 +509,11 @@ func (api *API) auditLogResourceLink(ctx context.Context, alog database.GetAudit
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
workspace, err := api.Database.GetWorkspaceByID(ctx, task.WorkspaceID.UUID)
|
||||
user, err := api.Database.GetUserByID(ctx, task.OwnerID)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("/tasks/%s/%s", workspace.OwnerName, task.Name)
|
||||
return fmt.Sprintf("/tasks/%s/%s", user.Username, task.ID)
|
||||
|
||||
default:
|
||||
return ""
|
||||
|
||||
+11
-10
@@ -50,6 +50,13 @@ func TestCheckPermissions(t *testing.T) {
|
||||
},
|
||||
Action: "read",
|
||||
},
|
||||
readOrgWorkspaces: {
|
||||
Object: codersdk.AuthorizationObject{
|
||||
ResourceType: codersdk.ResourceWorkspace,
|
||||
OrganizationID: adminUser.OrganizationID.String(),
|
||||
},
|
||||
Action: "read",
|
||||
},
|
||||
readMyself: {
|
||||
Object: codersdk.AuthorizationObject{
|
||||
ResourceType: codersdk.ResourceUser,
|
||||
@@ -58,16 +65,10 @@ func TestCheckPermissions(t *testing.T) {
|
||||
Action: "read",
|
||||
},
|
||||
readOwnWorkspaces: {
|
||||
Object: codersdk.AuthorizationObject{
|
||||
ResourceType: codersdk.ResourceWorkspace,
|
||||
OwnerID: "me",
|
||||
},
|
||||
Action: "read",
|
||||
},
|
||||
readOrgWorkspaces: {
|
||||
Object: codersdk.AuthorizationObject{
|
||||
ResourceType: codersdk.ResourceWorkspace,
|
||||
OrganizationID: adminUser.OrganizationID.String(),
|
||||
OwnerID: "me",
|
||||
},
|
||||
Action: "read",
|
||||
},
|
||||
@@ -92,9 +93,9 @@ func TestCheckPermissions(t *testing.T) {
|
||||
UserID: adminUser.UserID,
|
||||
Check: map[string]bool{
|
||||
readAllUsers: true,
|
||||
readOrgWorkspaces: true,
|
||||
readMyself: true,
|
||||
readOwnWorkspaces: true,
|
||||
readOrgWorkspaces: true,
|
||||
updateSpecificTemplate: true,
|
||||
},
|
||||
},
|
||||
@@ -104,9 +105,9 @@ func TestCheckPermissions(t *testing.T) {
|
||||
UserID: orgAdminUser.ID,
|
||||
Check: map[string]bool{
|
||||
readAllUsers: true,
|
||||
readOrgWorkspaces: true,
|
||||
readMyself: true,
|
||||
readOwnWorkspaces: true,
|
||||
readOrgWorkspaces: true,
|
||||
updateSpecificTemplate: true,
|
||||
},
|
||||
},
|
||||
@@ -116,9 +117,9 @@ func TestCheckPermissions(t *testing.T) {
|
||||
UserID: memberUser.ID,
|
||||
Check: map[string]bool{
|
||||
readAllUsers: false,
|
||||
readOrgWorkspaces: false,
|
||||
readMyself: true,
|
||||
readOwnWorkspaces: true,
|
||||
readOrgWorkspaces: false,
|
||||
updateSpecificTemplate: false,
|
||||
},
|
||||
},
|
||||
|
||||
@@ -1764,3 +1764,175 @@ func TestExecutorAutostartSkipsWhenNoProvisionersAvailable(t *testing.T) {
|
||||
|
||||
assert.Len(t, stats.Transitions, 1, "should create builds when provisioners are available")
|
||||
}
|
||||
|
||||
func TestExecutorTaskWorkspace(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
createTaskTemplate := func(t *testing.T, client *codersdk.Client, orgID uuid.UUID, ctx context.Context, defaultTTL time.Duration) codersdk.Template {
|
||||
t.Helper()
|
||||
|
||||
taskAppID := uuid.New()
|
||||
version := coderdtest.CreateTemplateVersion(t, client, orgID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionPlan: []*proto.Response{
|
||||
{
|
||||
Type: &proto.Response_Plan{
|
||||
Plan: &proto.PlanComplete{HasAiTasks: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
ProvisionApply: []*proto.Response{
|
||||
{
|
||||
Type: &proto.Response_Apply{
|
||||
Apply: &proto.ApplyComplete{
|
||||
Resources: []*proto.Resource{
|
||||
{
|
||||
Agents: []*proto.Agent{
|
||||
{
|
||||
Id: uuid.NewString(),
|
||||
Name: "dev",
|
||||
Auth: &proto.Agent_Token{
|
||||
Token: uuid.NewString(),
|
||||
},
|
||||
Apps: []*proto.App{
|
||||
{
|
||||
Id: taskAppID.String(),
|
||||
Slug: "task-app",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
AiTasks: []*proto.AITask{
|
||||
{
|
||||
AppId: taskAppID.String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, orgID, version.ID)
|
||||
|
||||
if defaultTTL > 0 {
|
||||
_, err := client.UpdateTemplateMeta(ctx, template.ID, codersdk.UpdateTemplateMeta{
|
||||
DefaultTTLMillis: defaultTTL.Milliseconds(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
return template
|
||||
}
|
||||
|
||||
createTaskWorkspace := func(t *testing.T, client *codersdk.Client, template codersdk.Template, ctx context.Context, input string) codersdk.Workspace {
|
||||
t.Helper()
|
||||
|
||||
exp := codersdk.NewExperimentalClient(client)
|
||||
task, err := exp.CreateTask(ctx, "me", codersdk.CreateTaskRequest{
|
||||
TemplateVersionID: template.ActiveVersionID,
|
||||
Input: input,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.True(t, task.WorkspaceID.Valid, "task should have a workspace")
|
||||
|
||||
workspace, err := client.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
return workspace
|
||||
}
|
||||
|
||||
t.Run("Autostart", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
ctx = testutil.Context(t, testutil.WaitShort)
|
||||
sched = mustSchedule(t, "CRON_TZ=UTC 0 * * * *")
|
||||
tickCh = make(chan time.Time)
|
||||
statsCh = make(chan autobuild.Stats)
|
||||
client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
AutobuildTicker: tickCh,
|
||||
IncludeProvisionerDaemon: true,
|
||||
AutobuildStats: statsCh,
|
||||
})
|
||||
admin = coderdtest.CreateFirstUser(t, client)
|
||||
)
|
||||
|
||||
// Given: A task workspace
|
||||
template := createTaskTemplate(t, client, admin.OrganizationID, ctx, 0)
|
||||
workspace := createTaskWorkspace(t, client, template, ctx, "test task for autostart")
|
||||
|
||||
// Given: The task workspace has an autostart schedule
|
||||
err := client.UpdateWorkspaceAutostart(ctx, workspace.ID, codersdk.UpdateWorkspaceAutostartRequest{
|
||||
Schedule: ptr.Ref(sched.String()),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Given: That the workspace is in a stopped state.
|
||||
workspace = coderdtest.MustTransitionWorkspace(t, client, workspace.ID, codersdk.WorkspaceTransitionStart, codersdk.WorkspaceTransitionStop)
|
||||
|
||||
p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, map[string]string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// When: the autobuild executor ticks after the scheduled time
|
||||
go func() {
|
||||
tickTime := sched.Next(workspace.LatestBuild.CreatedAt)
|
||||
coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime)
|
||||
tickCh <- tickTime
|
||||
close(tickCh)
|
||||
}()
|
||||
|
||||
// Then: We expect to see a start transition
|
||||
stats := <-statsCh
|
||||
require.Len(t, stats.Transitions, 1, "lifecycle executor should transition the task workspace")
|
||||
assert.Contains(t, stats.Transitions, workspace.ID, "task workspace should be in transitions")
|
||||
assert.Equal(t, database.WorkspaceTransitionStart, stats.Transitions[workspace.ID], "should autostart the workspace")
|
||||
require.Empty(t, stats.Errors, "should have no errors when managing task workspaces")
|
||||
})
|
||||
|
||||
t.Run("Autostop", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
ctx = testutil.Context(t, testutil.WaitShort)
|
||||
tickCh = make(chan time.Time)
|
||||
statsCh = make(chan autobuild.Stats)
|
||||
client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
AutobuildTicker: tickCh,
|
||||
IncludeProvisionerDaemon: true,
|
||||
AutobuildStats: statsCh,
|
||||
})
|
||||
admin = coderdtest.CreateFirstUser(t, client)
|
||||
)
|
||||
|
||||
// Given: A task workspace with an 8 hour deadline
|
||||
template := createTaskTemplate(t, client, admin.OrganizationID, ctx, 8*time.Hour)
|
||||
workspace := createTaskWorkspace(t, client, template, ctx, "test task for autostop")
|
||||
|
||||
// Given: The workspace is currently running
|
||||
workspace = coderdtest.MustWorkspace(t, client, workspace.ID)
|
||||
require.Equal(t, codersdk.WorkspaceTransitionStart, workspace.LatestBuild.Transition)
|
||||
require.NotZero(t, workspace.LatestBuild.Deadline, "workspace should have a deadline for autostop")
|
||||
|
||||
p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, map[string]string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// When: the autobuild executor ticks after the deadline
|
||||
go func() {
|
||||
tickTime := workspace.LatestBuild.Deadline.Time.Add(time.Minute)
|
||||
coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime)
|
||||
tickCh <- tickTime
|
||||
close(tickCh)
|
||||
}()
|
||||
|
||||
// Then: We expect to see a stop transition
|
||||
stats := <-statsCh
|
||||
require.Len(t, stats.Transitions, 1, "lifecycle executor should transition the task workspace")
|
||||
assert.Contains(t, stats.Transitions, workspace.ID, "task workspace should be in transitions")
|
||||
assert.Equal(t, database.WorkspaceTransitionStop, stats.Transitions[workspace.ID], "should autostop the workspace")
|
||||
require.Empty(t, stats.Errors, "should have no errors when managing task workspaces")
|
||||
})
|
||||
}
|
||||
|
||||
+1
-4
@@ -1021,10 +1021,7 @@ func New(options *Options) *API {
|
||||
apiRateLimiter,
|
||||
httpmw.ReportCLITelemetry(api.Logger, options.Telemetry),
|
||||
)
|
||||
r.Route("/aitasks", func(r chi.Router) {
|
||||
r.Use(apiKeyMiddleware)
|
||||
r.Get("/prompts", api.aiTasksPrompts)
|
||||
})
|
||||
|
||||
r.Route("/tasks", func(r chi.Router) {
|
||||
r.Use(apiKeyMiddleware)
|
||||
|
||||
|
||||
@@ -1604,7 +1604,7 @@ func (nopcloser) Close() error { return nil }
|
||||
// SDKError coerces err into an SDK error.
|
||||
func SDKError(t testing.TB, err error) *codersdk.Error {
|
||||
var cerr *codersdk.Error
|
||||
require.True(t, errors.As(err, &cerr), "should be SDK error, got %w", err)
|
||||
require.True(t, errors.As(err, &cerr), "should be SDK error, got %s", err)
|
||||
return cerr
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ const (
|
||||
CheckSubsystemsNotNone CheckConstraint = "subsystems_not_none" // workspace_agents
|
||||
CheckWorkspaceBuildsAiTaskSidebarAppIDRequired CheckConstraint = "workspace_builds_ai_task_sidebar_app_id_required" // workspace_builds
|
||||
CheckWorkspaceBuildsDeadlineBelowMaxDeadline CheckConstraint = "workspace_builds_deadline_below_max_deadline" // workspace_builds
|
||||
CheckTelemetryLockEventTypeConstraint CheckConstraint = "telemetry_lock_event_type_constraint" // telemetry_locks
|
||||
CheckValidationMonotonicOrder CheckConstraint = "validation_monotonic_order" // template_version_parameters
|
||||
CheckUsageEventTypeCheck CheckConstraint = "usage_event_type_check" // usage_events
|
||||
)
|
||||
|
||||
@@ -714,12 +714,13 @@ func RBACRole(role rbac.Role) codersdk.Role {
|
||||
|
||||
orgPerms := role.ByOrgID[slim.OrganizationID]
|
||||
return codersdk.Role{
|
||||
Name: slim.Name,
|
||||
OrganizationID: slim.OrganizationID,
|
||||
DisplayName: slim.DisplayName,
|
||||
SitePermissions: List(role.Site, RBACPermission),
|
||||
OrganizationPermissions: List(orgPerms.Org, RBACPermission),
|
||||
UserPermissions: List(role.User, RBACPermission),
|
||||
Name: slim.Name,
|
||||
OrganizationID: slim.OrganizationID,
|
||||
DisplayName: slim.DisplayName,
|
||||
SitePermissions: List(role.Site, RBACPermission),
|
||||
UserPermissions: List(role.User, RBACPermission),
|
||||
OrganizationPermissions: List(orgPerms.Org, RBACPermission),
|
||||
OrganizationMemberPermissions: List(orgPerms.Member, RBACPermission),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -734,8 +735,8 @@ func Role(role database.CustomRole) codersdk.Role {
|
||||
OrganizationID: orgID,
|
||||
DisplayName: role.DisplayName,
|
||||
SitePermissions: List(role.SitePermissions, Permission),
|
||||
OrganizationPermissions: List(role.OrgPermissions, Permission),
|
||||
UserPermissions: List(role.UserPermissions, Permission),
|
||||
OrganizationPermissions: List(role.OrgPermissions, Permission),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -962,7 +963,7 @@ func AIBridgeInterception(interception database.AIBridgeInterception, initiator
|
||||
// created_at ASC
|
||||
return sdkToolUsages[i].CreatedAt.Before(sdkToolUsages[j].CreatedAt)
|
||||
})
|
||||
return codersdk.AIBridgeInterception{
|
||||
intc := codersdk.AIBridgeInterception{
|
||||
ID: interception.ID,
|
||||
Initiator: MinimalUserFromVisibleUser(initiator),
|
||||
Provider: interception.Provider,
|
||||
@@ -973,6 +974,10 @@ func AIBridgeInterception(interception database.AIBridgeInterception, initiator
|
||||
UserPrompts: sdkUserPrompts,
|
||||
ToolUsages: sdkToolUsages,
|
||||
}
|
||||
if interception.EndedAt.Valid {
|
||||
intc.EndedAt = &interception.EndedAt.Time
|
||||
}
|
||||
return intc
|
||||
}
|
||||
|
||||
func AIBridgeTokenUsage(usage database.AIBridgeTokenUsage) codersdk.AIBridgeTokenUsage {
|
||||
|
||||
@@ -219,8 +219,8 @@ var (
|
||||
rbac.ResourceUser.Type: {policy.ActionRead, policy.ActionReadPersonal, policy.ActionUpdatePersonal},
|
||||
rbac.ResourceWorkspaceDormant.Type: {policy.ActionDelete, policy.ActionRead, policy.ActionUpdate, policy.ActionWorkspaceStop},
|
||||
rbac.ResourceWorkspace.Type: {policy.ActionDelete, policy.ActionRead, policy.ActionUpdate, policy.ActionWorkspaceStart, policy.ActionWorkspaceStop, policy.ActionCreateAgent},
|
||||
// Provisionerd needs to read and update tasks associated with workspaces.
|
||||
rbac.ResourceTask.Type: {policy.ActionRead, policy.ActionUpdate},
|
||||
// Provisionerd needs to read, update, and delete tasks associated with workspaces.
|
||||
rbac.ResourceTask.Type: {policy.ActionRead, policy.ActionUpdate, policy.ActionDelete},
|
||||
rbac.ResourceApiKey.Type: {policy.WildcardSymbol},
|
||||
// When org scoped provisioner credentials are implemented,
|
||||
// this can be reduced to read a specific org.
|
||||
@@ -254,6 +254,7 @@ var (
|
||||
rbac.ResourceFile.Type: {policy.ActionRead}, // Required to read terraform files
|
||||
rbac.ResourceNotificationMessage.Type: {policy.ActionCreate, policy.ActionRead},
|
||||
rbac.ResourceSystem.Type: {policy.WildcardSymbol},
|
||||
rbac.ResourceTask.Type: {policy.ActionRead, policy.ActionUpdate},
|
||||
rbac.ResourceTemplate.Type: {policy.ActionRead, policy.ActionUpdate},
|
||||
rbac.ResourceUser.Type: {policy.ActionRead},
|
||||
rbac.ResourceWorkspace.Type: {policy.ActionDelete, policy.ActionRead, policy.ActionUpdate, policy.ActionWorkspaceStart, policy.ActionWorkspaceStop},
|
||||
@@ -395,11 +396,13 @@ var (
|
||||
Identifier: rbac.RoleIdentifier{Name: "subagentapi"},
|
||||
DisplayName: "Sub Agent API",
|
||||
Site: []rbac.Permission{},
|
||||
User: rbac.Permissions(map[string][]policy.Action{
|
||||
rbac.ResourceWorkspace.Type: {policy.ActionRead, policy.ActionUpdate, policy.ActionCreateAgent, policy.ActionDeleteAgent},
|
||||
}),
|
||||
User: []rbac.Permission{},
|
||||
ByOrgID: map[string]rbac.OrgPermissions{
|
||||
orgID.String(): {},
|
||||
orgID.String(): {
|
||||
Member: rbac.Permissions(map[string][]policy.Action{
|
||||
rbac.ResourceWorkspace.Type: {policy.ActionRead, policy.ActionUpdate, policy.ActionCreateAgent, policy.ActionDeleteAgent},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
@@ -1290,14 +1293,17 @@ func (q *querier) customRoleCheck(ctx context.Context, role database.CustomRole)
|
||||
return xerrors.Errorf("invalid role: %w", err)
|
||||
}
|
||||
|
||||
if len(rbacRole.ByOrgID) > 0 && len(rbacRole.Site) > 0 {
|
||||
// This is a choice to keep roles simple. If we allow mixing site and org scoped perms, then knowing who can
|
||||
// do what gets more complicated.
|
||||
return xerrors.Errorf("invalid custom role, cannot assign both org and site permissions at the same time")
|
||||
if len(rbacRole.ByOrgID) > 0 && (len(rbacRole.Site) > 0 || len(rbacRole.User) > 0) {
|
||||
// This is a choice to keep roles simple. If we allow mixing site and org
|
||||
// scoped perms, then knowing who can do what gets more complicated. Roles
|
||||
// should either be entirely org-scoped or entirely unrelated to
|
||||
// organizations.
|
||||
return xerrors.Errorf("invalid custom role, cannot assign both org-scoped and site/user permissions at the same time")
|
||||
}
|
||||
|
||||
if len(rbacRole.ByOrgID) > 1 {
|
||||
// Again to avoid more complexity in our roles
|
||||
// Again to avoid more complexity in our roles. Roles are limited to one
|
||||
// organization.
|
||||
return xerrors.Errorf("invalid custom role, cannot assign permissions to more than 1 org at a time")
|
||||
}
|
||||
|
||||
@@ -1313,7 +1319,18 @@ func (q *querier) customRoleCheck(ctx context.Context, role database.CustomRole)
|
||||
for _, orgPerm := range perms.Org {
|
||||
err := q.customRoleEscalationCheck(ctx, act, orgPerm, rbac.Object{OrgID: orgID, Type: orgPerm.ResourceType})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("org=%q: %w", orgID, err)
|
||||
return xerrors.Errorf("org=%q: org: %w", orgID, err)
|
||||
}
|
||||
}
|
||||
for _, memberPerm := range perms.Member {
|
||||
// The person giving the permission should still be required to have
|
||||
// the permissions throughout the org in order to give individuals the
|
||||
// same permission among their own resources, since the role can be given
|
||||
// to anyone. The `Owner` is intentionally omitted from the `Object` to
|
||||
// enforce this.
|
||||
err := q.customRoleEscalationCheck(ctx, act, memberPerm, rbac.Object{OrgID: orgID, Type: memberPerm.ResourceType})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("org=%q: member: %w", orgID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1331,8 +1348,8 @@ func (q *querier) customRoleCheck(ctx context.Context, role database.CustomRole)
|
||||
func (q *querier) authorizeProvisionerJob(ctx context.Context, job database.ProvisionerJob) error {
|
||||
switch job.Type {
|
||||
case database.ProvisionerJobTypeWorkspaceBuild:
|
||||
// Authorized call to get workspace build. If we can read the build, we
|
||||
// can read the job.
|
||||
// Authorized call to get workspace build. If we can read the build, we can
|
||||
// read the job.
|
||||
_, err := q.GetWorkspaceBuildByJobID(ctx, job.ID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fetch related workspace build: %w", err)
|
||||
@@ -1375,8 +1392,8 @@ func (q *querier) ActivityBumpWorkspace(ctx context.Context, arg database.Activi
|
||||
}
|
||||
|
||||
func (q *querier) AllUserIDs(ctx context.Context, includeSystem bool) ([]uuid.UUID, error) {
|
||||
// Although this technically only reads users, only system-related functions should be
|
||||
// allowed to call this.
|
||||
// Although this technically only reads users, only system-related functions
|
||||
// should be allowed to call this.
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceSystem); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1395,8 +1412,8 @@ func (q *querier) ArchiveUnusedTemplateVersions(ctx context.Context, arg databas
|
||||
}
|
||||
|
||||
func (q *querier) BatchUpdateWorkspaceLastUsedAt(ctx context.Context, arg database.BatchUpdateWorkspaceLastUsedAtParams) error {
|
||||
// Could be any workspace and checking auth to each workspace is overkill for the purpose
|
||||
// of this function.
|
||||
// Could be any workspace and checking auth to each workspace is overkill for
|
||||
// the purpose of this function.
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceWorkspace.All()); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1424,6 +1441,13 @@ func (q *querier) BulkMarkNotificationMessagesSent(ctx context.Context, arg data
|
||||
return q.db.BulkMarkNotificationMessagesSent(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) CalculateAIBridgeInterceptionsTelemetrySummary(ctx context.Context, arg database.CalculateAIBridgeInterceptionsTelemetrySummaryParams) (database.CalculateAIBridgeInterceptionsTelemetrySummaryRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceAibridgeInterception); err != nil {
|
||||
return database.CalculateAIBridgeInterceptionsTelemetrySummaryRow{}, err
|
||||
}
|
||||
return q.db.CalculateAIBridgeInterceptionsTelemetrySummary(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) ClaimPrebuiltWorkspace(ctx context.Context, arg database.ClaimPrebuiltWorkspaceParams) (database.ClaimPrebuiltWorkspaceRow, error) {
|
||||
empty := database.ClaimPrebuiltWorkspaceRow{}
|
||||
|
||||
@@ -1723,6 +1747,13 @@ func (q *querier) DeleteOldProvisionerDaemons(ctx context.Context) error {
|
||||
return q.db.DeleteOldProvisionerDaemons(ctx)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteOldTelemetryLocks(ctx context.Context, beforeTime time.Time) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceSystem); err != nil {
|
||||
return err
|
||||
}
|
||||
return q.db.DeleteOldTelemetryLocks(ctx, beforeTime)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteOldWorkspaceAgentLogs(ctx context.Context, threshold time.Time) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceSystem); err != nil {
|
||||
return err
|
||||
@@ -2618,6 +2649,13 @@ func (q *querier) GetOrganizationsByUserID(ctx context.Context, userID database.
|
||||
return fetchWithPostFilter(q.auth, policy.ActionRead, q.db.GetOrganizationsByUserID)(ctx, userID)
|
||||
}
|
||||
|
||||
func (q *querier) GetOrganizationsWithPrebuildStatus(ctx context.Context, arg database.GetOrganizationsWithPrebuildStatusParams) ([]database.GetOrganizationsWithPrebuildStatusRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceOrganization.All()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q.db.GetOrganizationsWithPrebuildStatus(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) GetParameterSchemasByJobID(ctx context.Context, jobID uuid.UUID) ([]database.ParameterSchema, error) {
|
||||
version, err := q.db.GetTemplateVersionByJobID(ctx, jobID)
|
||||
if err != nil {
|
||||
@@ -4212,6 +4250,13 @@ func (q *querier) InsertTelemetryItemIfNotExists(ctx context.Context, arg databa
|
||||
return q.db.InsertTelemetryItemIfNotExists(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) InsertTelemetryLock(ctx context.Context, arg database.InsertTelemetryLockParams) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionCreate, rbac.ResourceSystem); err != nil {
|
||||
return err
|
||||
}
|
||||
return q.db.InsertTelemetryLock(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) InsertTemplate(ctx context.Context, arg database.InsertTemplateParams) error {
|
||||
obj := rbac.ResourceTemplate.InOrg(arg.OrganizationID)
|
||||
if err := q.authorizeContext(ctx, policy.ActionCreate, obj); err != nil {
|
||||
@@ -4523,6 +4568,13 @@ func (q *querier) ListAIBridgeInterceptions(ctx context.Context, arg database.Li
|
||||
return q.db.ListAuthorizedAIBridgeInterceptions(ctx, arg, prep)
|
||||
}
|
||||
|
||||
func (q *querier) ListAIBridgeInterceptionsTelemetrySummaries(ctx context.Context, arg database.ListAIBridgeInterceptionsTelemetrySummariesParams) ([]database.ListAIBridgeInterceptionsTelemetrySummariesRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceAibridgeInterception); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q.db.ListAIBridgeInterceptionsTelemetrySummaries(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) ListAIBridgeTokenUsagesByInterceptionIDs(ctx context.Context, interceptionIDs []uuid.UUID) ([]database.AIBridgeTokenUsage, error) {
|
||||
// This function is a system function until we implement a join for aibridge interceptions.
|
||||
// Matches the behavior of the workspaces listing endpoint.
|
||||
@@ -4711,6 +4763,13 @@ func (q *querier) UnfavoriteWorkspace(ctx context.Context, id uuid.UUID) error {
|
||||
return update(q.log, q.auth, fetch, q.db.UnfavoriteWorkspace)(ctx, id)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateAIBridgeInterceptionEnded(ctx context.Context, params database.UpdateAIBridgeInterceptionEndedParams) (database.AIBridgeInterception, error) {
|
||||
if err := q.authorizeAIBridgeInterceptionAction(ctx, policy.ActionUpdate, params.ID); err != nil {
|
||||
return database.AIBridgeInterception{}, err
|
||||
}
|
||||
return q.db.UpdateAIBridgeInterceptionEnded(ctx, params)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateAPIKeyByID(ctx context.Context, arg database.UpdateAPIKeyByIDParams) error {
|
||||
fetch := func(ctx context.Context, arg database.UpdateAPIKeyByIDParams) (database.APIKey, error) {
|
||||
return q.db.GetAPIKeyByID(ctx, arg.ID)
|
||||
@@ -4882,10 +4941,10 @@ func (q *querier) UpdateOrganizationDeletedByID(ctx context.Context, arg databas
|
||||
return deleteQ(q.log, q.auth, q.db.GetOrganizationByID, deleteF)(ctx, arg.ID)
|
||||
}
|
||||
|
||||
func (q *querier) UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg database.UpdatePrebuildProvisionerJobWithCancelParams) ([]uuid.UUID, error) {
|
||||
func (q *querier) UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg database.UpdatePrebuildProvisionerJobWithCancelParams) ([]database.UpdatePrebuildProvisionerJobWithCancelRow, error) {
|
||||
// Prebuild operation for canceling pending prebuild jobs from non-active template versions
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourcePrebuiltWorkspace); err != nil {
|
||||
return []uuid.UUID{}, err
|
||||
return []database.UpdatePrebuildProvisionerJobWithCancelRow{}, err
|
||||
}
|
||||
return q.db.UpdatePrebuildProvisionerJobWithCancel(ctx, arg)
|
||||
}
|
||||
|
||||
@@ -646,10 +646,13 @@ func (s *MethodTestSuite) TestProvisionerJob() {
|
||||
PresetID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
Now: dbtime.Now(),
|
||||
}
|
||||
jobIDs := []uuid.UUID{uuid.New(), uuid.New()}
|
||||
canceledJobs := []database.UpdatePrebuildProvisionerJobWithCancelRow{
|
||||
{ID: uuid.New(), WorkspaceID: uuid.New(), TemplateID: uuid.New(), TemplateVersionPresetID: uuid.NullUUID{UUID: uuid.New(), Valid: true}},
|
||||
{ID: uuid.New(), WorkspaceID: uuid.New(), TemplateID: uuid.New(), TemplateVersionPresetID: uuid.NullUUID{UUID: uuid.New(), Valid: true}},
|
||||
}
|
||||
|
||||
dbm.EXPECT().UpdatePrebuildProvisionerJobWithCancel(gomock.Any(), arg).Return(jobIDs, nil).AnyTimes()
|
||||
check.Args(arg).Asserts(rbac.ResourcePrebuiltWorkspace, policy.ActionUpdate).Returns(jobIDs)
|
||||
dbm.EXPECT().UpdatePrebuildProvisionerJobWithCancel(gomock.Any(), arg).Return(canceledJobs, nil).AnyTimes()
|
||||
check.Args(arg).Asserts(rbac.ResourcePrebuiltWorkspace, policy.ActionUpdate).Returns(canceledJobs)
|
||||
}))
|
||||
s.Run("GetProvisionerJobsByIDs", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
org := testutil.Fake(s.T(), faker, database.Organization{})
|
||||
@@ -3756,6 +3759,14 @@ func (s *MethodTestSuite) TestPrebuilds() {
|
||||
dbm.EXPECT().GetPrebuildMetrics(gomock.Any()).Return([]database.GetPrebuildMetricsRow{}, nil).AnyTimes()
|
||||
check.Args().Asserts(rbac.ResourceWorkspace.All(), policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetOrganizationsWithPrebuildStatus", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
arg := database.GetOrganizationsWithPrebuildStatusParams{
|
||||
UserID: uuid.New(),
|
||||
GroupName: "test",
|
||||
}
|
||||
dbm.EXPECT().GetOrganizationsWithPrebuildStatus(gomock.Any(), arg).Return([]database.GetOrganizationsWithPrebuildStatusRow{}, nil).AnyTimes()
|
||||
check.Args(arg).Asserts(rbac.ResourceOrganization.All(), policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetPrebuildsSettings", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
dbm.EXPECT().GetPrebuildsSettings(gomock.Any()).Return("{}", nil).AnyTimes()
|
||||
check.Args().Asserts()
|
||||
@@ -4617,4 +4628,35 @@ func (s *MethodTestSuite) TestAIBridge() {
|
||||
db.EXPECT().ListAIBridgeToolUsagesByInterceptionIDs(gomock.Any(), ids).Return([]database.AIBridgeToolUsage{}, nil).AnyTimes()
|
||||
check.Args(ids).Asserts(rbac.ResourceSystem, policy.ActionRead).Returns([]database.AIBridgeToolUsage{})
|
||||
}))
|
||||
|
||||
s.Run("UpdateAIBridgeInterceptionEnded", s.Mocked(func(db *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
intcID := uuid.UUID{1}
|
||||
params := database.UpdateAIBridgeInterceptionEndedParams{ID: intcID}
|
||||
intc := testutil.Fake(s.T(), faker, database.AIBridgeInterception{ID: intcID})
|
||||
db.EXPECT().GetAIBridgeInterceptionByID(gomock.Any(), intcID).Return(intc, nil).AnyTimes() // Validation.
|
||||
db.EXPECT().UpdateAIBridgeInterceptionEnded(gomock.Any(), params).Return(intc, nil).AnyTimes()
|
||||
check.Args(params).Asserts(intc, policy.ActionUpdate).Returns(intc)
|
||||
}))
|
||||
}
|
||||
|
||||
func (s *MethodTestSuite) TestTelemetry() {
|
||||
s.Run("InsertTelemetryLock", s.Mocked(func(db *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
db.EXPECT().InsertTelemetryLock(gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
|
||||
check.Args(database.InsertTelemetryLockParams{}).Asserts(rbac.ResourceSystem, policy.ActionCreate)
|
||||
}))
|
||||
|
||||
s.Run("DeleteOldTelemetryLocks", s.Mocked(func(db *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
db.EXPECT().DeleteOldTelemetryLocks(gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
|
||||
check.Args(time.Time{}).Asserts(rbac.ResourceSystem, policy.ActionDelete)
|
||||
}))
|
||||
|
||||
s.Run("ListAIBridgeInterceptionsTelemetrySummaries", s.Mocked(func(db *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
db.EXPECT().ListAIBridgeInterceptionsTelemetrySummaries(gomock.Any(), gomock.Any()).Return([]database.ListAIBridgeInterceptionsTelemetrySummariesRow{}, nil).AnyTimes()
|
||||
check.Args(database.ListAIBridgeInterceptionsTelemetrySummariesParams{}).Asserts(rbac.ResourceAibridgeInterception, policy.ActionRead)
|
||||
}))
|
||||
|
||||
s.Run("CalculateAIBridgeInterceptionsTelemetrySummary", s.Mocked(func(db *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
db.EXPECT().CalculateAIBridgeInterceptionsTelemetrySummary(gomock.Any(), gomock.Any()).Return(database.CalculateAIBridgeInterceptionsTelemetrySummaryRow{}, nil).AnyTimes()
|
||||
check.Args(database.CalculateAIBridgeInterceptionsTelemetrySummaryParams{}).Asserts(rbac.ResourceAibridgeInterception, policy.ActionRead)
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -41,6 +41,7 @@ type WorkspaceResponse struct {
|
||||
Build database.WorkspaceBuild
|
||||
AgentToken string
|
||||
TemplateVersionResponse
|
||||
Task database.Task
|
||||
}
|
||||
|
||||
// WorkspaceBuildBuilder generates workspace builds and associated
|
||||
@@ -57,6 +58,7 @@ type WorkspaceBuildBuilder struct {
|
||||
agentToken string
|
||||
jobStatus database.ProvisionerJobStatus
|
||||
taskAppID uuid.UUID
|
||||
taskSeed database.TaskTable
|
||||
}
|
||||
|
||||
// WorkspaceBuild generates a workspace build for the provided workspace.
|
||||
@@ -115,25 +117,28 @@ func (b WorkspaceBuildBuilder) WithAgent(mutations ...func([]*sdkproto.Agent) []
|
||||
return b
|
||||
}
|
||||
|
||||
func (b WorkspaceBuildBuilder) WithTask(seed *sdkproto.App) WorkspaceBuildBuilder {
|
||||
if seed == nil {
|
||||
seed = &sdkproto.App{}
|
||||
func (b WorkspaceBuildBuilder) WithTask(taskSeed database.TaskTable, appSeed *sdkproto.App) WorkspaceBuildBuilder {
|
||||
//nolint:revive // returns modified struct
|
||||
b.taskSeed = taskSeed
|
||||
|
||||
if appSeed == nil {
|
||||
appSeed = &sdkproto.App{}
|
||||
}
|
||||
|
||||
var err error
|
||||
//nolint: revive // returns modified struct
|
||||
b.taskAppID, err = uuid.Parse(takeFirst(seed.Id, uuid.NewString()))
|
||||
b.taskAppID, err = uuid.Parse(takeFirst(appSeed.Id, uuid.NewString()))
|
||||
require.NoError(b.t, err)
|
||||
|
||||
return b.Params(database.WorkspaceBuildParameter{
|
||||
Name: codersdk.AITaskPromptParameterName,
|
||||
Value: "list me",
|
||||
Value: b.taskSeed.Prompt,
|
||||
}).WithAgent(func(a []*sdkproto.Agent) []*sdkproto.Agent {
|
||||
a[0].Apps = []*sdkproto.App{
|
||||
{
|
||||
Id: b.taskAppID.String(),
|
||||
Slug: takeFirst(seed.Slug, "task-app"),
|
||||
Url: takeFirst(seed.Url, ""),
|
||||
Slug: takeFirst(appSeed.Slug, "task-app"),
|
||||
Url: takeFirst(appSeed.Url, ""),
|
||||
},
|
||||
}
|
||||
return a
|
||||
@@ -161,6 +166,19 @@ func (b WorkspaceBuildBuilder) Canceled() WorkspaceBuildBuilder {
|
||||
// Workspace will be optionally populated if no ID is set on the provided
|
||||
// workspace.
|
||||
func (b WorkspaceBuildBuilder) Do() WorkspaceResponse {
|
||||
var resp WorkspaceResponse
|
||||
// Use transaction, like real wsbuilder.
|
||||
err := b.db.InTx(func(tx database.Store) error {
|
||||
//nolint:revive // calls do on modified struct
|
||||
b.db = tx
|
||||
resp = b.doInTX()
|
||||
return nil
|
||||
}, nil)
|
||||
require.NoError(b.t, err)
|
||||
return resp
|
||||
}
|
||||
|
||||
func (b WorkspaceBuildBuilder) doInTX() WorkspaceResponse {
|
||||
b.t.Helper()
|
||||
jobID := uuid.New()
|
||||
b.seed.ID = uuid.New()
|
||||
@@ -212,6 +230,37 @@ func (b WorkspaceBuildBuilder) Do() WorkspaceResponse {
|
||||
b.seed.WorkspaceID = b.ws.ID
|
||||
b.seed.InitiatorID = takeFirst(b.seed.InitiatorID, b.ws.OwnerID)
|
||||
|
||||
// If a task was requested, ensure it exists and is associated with this
|
||||
// workspace.
|
||||
if b.taskAppID != uuid.Nil {
|
||||
b.logger.Debug(context.Background(), "creating or updating task", "task_id", b.taskSeed.ID)
|
||||
b.taskSeed.OrganizationID = takeFirst(b.taskSeed.OrganizationID, b.ws.OrganizationID)
|
||||
b.taskSeed.OwnerID = takeFirst(b.taskSeed.OwnerID, b.ws.OwnerID)
|
||||
b.taskSeed.Name = takeFirst(b.taskSeed.Name, b.ws.Name)
|
||||
b.taskSeed.WorkspaceID = uuid.NullUUID{UUID: takeFirst(b.taskSeed.WorkspaceID.UUID, b.ws.ID), Valid: true}
|
||||
b.taskSeed.TemplateVersionID = takeFirst(b.taskSeed.TemplateVersionID, b.seed.TemplateVersionID)
|
||||
|
||||
// Try to fetch existing task and update its workspace ID.
|
||||
if task, err := b.db.GetTaskByID(ownerCtx, b.taskSeed.ID); err == nil {
|
||||
if !task.WorkspaceID.Valid {
|
||||
b.logger.Info(context.Background(), "updating task workspace id", "task_id", b.taskSeed.ID, "workspace_id", b.ws.ID)
|
||||
_, err = b.db.UpdateTaskWorkspaceID(ownerCtx, database.UpdateTaskWorkspaceIDParams{
|
||||
ID: b.taskSeed.ID,
|
||||
WorkspaceID: uuid.NullUUID{UUID: b.ws.ID, Valid: true},
|
||||
})
|
||||
require.NoError(b.t, err, "update task workspace id")
|
||||
} else if task.WorkspaceID.UUID != b.ws.ID {
|
||||
require.Fail(b.t, "task already has a workspace id, mismatch", task.WorkspaceID.UUID, b.ws.ID)
|
||||
}
|
||||
} else if errors.Is(err, sql.ErrNoRows) {
|
||||
task := dbgen.Task(b.t, b.db, b.taskSeed)
|
||||
b.taskSeed.ID = task.ID
|
||||
b.logger.Info(context.Background(), "created new task", "task_id", b.taskSeed.ID)
|
||||
} else {
|
||||
require.NoError(b.t, err, "get task by id")
|
||||
}
|
||||
}
|
||||
|
||||
// Create a provisioner job for the build!
|
||||
payload, err := json.Marshal(provisionerdserver.WorkspaceProvisionJob{
|
||||
WorkspaceBuildID: b.seed.ID,
|
||||
@@ -324,6 +373,11 @@ func (b WorkspaceBuildBuilder) Do() WorkspaceResponse {
|
||||
b.logger.Debug(context.Background(), "linked task to workspace build",
|
||||
slog.F("task_id", task.ID),
|
||||
slog.F("build_number", resp.Build.BuildNumber))
|
||||
|
||||
// Update task after linking.
|
||||
task, err = b.db.GetTaskByID(ownerCtx, task.ID)
|
||||
require.NoError(b.t, err, "get task by id")
|
||||
resp.Task = task
|
||||
}
|
||||
|
||||
for i := range b.params {
|
||||
|
||||
@@ -1495,7 +1495,7 @@ func ClaimPrebuild(
|
||||
return claimedWorkspace
|
||||
}
|
||||
|
||||
func AIBridgeInterception(t testing.TB, db database.Store, seed database.InsertAIBridgeInterceptionParams) database.AIBridgeInterception {
|
||||
func AIBridgeInterception(t testing.TB, db database.Store, seed database.InsertAIBridgeInterceptionParams, endedAt *time.Time) database.AIBridgeInterception {
|
||||
interception, err := db.InsertAIBridgeInterception(genCtx, database.InsertAIBridgeInterceptionParams{
|
||||
ID: takeFirst(seed.ID, uuid.New()),
|
||||
InitiatorID: takeFirst(seed.InitiatorID, uuid.New()),
|
||||
@@ -1504,6 +1504,13 @@ func AIBridgeInterception(t testing.TB, db database.Store, seed database.InsertA
|
||||
Metadata: takeFirstSlice(seed.Metadata, json.RawMessage("{}")),
|
||||
StartedAt: takeFirst(seed.StartedAt, dbtime.Now()),
|
||||
})
|
||||
if endedAt != nil {
|
||||
interception, err = db.UpdateAIBridgeInterceptionEnded(genCtx, database.UpdateAIBridgeInterceptionEndedParams{
|
||||
ID: interception.ID,
|
||||
EndedAt: *endedAt,
|
||||
})
|
||||
require.NoError(t, err, "insert aibridge interception")
|
||||
}
|
||||
require.NoError(t, err, "insert aibridge interception")
|
||||
return interception
|
||||
}
|
||||
@@ -1569,6 +1576,7 @@ func Task(t testing.TB, db database.Store, orig database.TaskTable) database.Tas
|
||||
}
|
||||
|
||||
task, err := db.InsertTask(genCtx, database.InsertTaskParams{
|
||||
ID: takeFirst(orig.ID, uuid.New()),
|
||||
OrganizationID: orig.OrganizationID,
|
||||
OwnerID: orig.OwnerID,
|
||||
Name: takeFirst(orig.Name, taskname.GenerateFallback()),
|
||||
|
||||
@@ -158,6 +158,13 @@ func (m queryMetricsStore) BulkMarkNotificationMessagesSent(ctx context.Context,
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) CalculateAIBridgeInterceptionsTelemetrySummary(ctx context.Context, arg database.CalculateAIBridgeInterceptionsTelemetrySummaryParams) (database.CalculateAIBridgeInterceptionsTelemetrySummaryRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.CalculateAIBridgeInterceptionsTelemetrySummary(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("CalculateAIBridgeInterceptionsTelemetrySummary").Observe(time.Since(start).Seconds())
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) ClaimPrebuiltWorkspace(ctx context.Context, arg database.ClaimPrebuiltWorkspaceParams) (database.ClaimPrebuiltWorkspaceRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.ClaimPrebuiltWorkspace(ctx, arg)
|
||||
@@ -403,6 +410,13 @@ func (m queryMetricsStore) DeleteOldProvisionerDaemons(ctx context.Context) erro
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteOldTelemetryLocks(ctx context.Context, periodEndingAtBefore time.Time) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.DeleteOldTelemetryLocks(ctx, periodEndingAtBefore)
|
||||
m.queryLatencies.WithLabelValues("DeleteOldTelemetryLocks").Observe(time.Since(start).Seconds())
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteOldWorkspaceAgentLogs(ctx context.Context, arg time.Time) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.DeleteOldWorkspaceAgentLogs(ctx, arg)
|
||||
@@ -1229,6 +1243,13 @@ func (m queryMetricsStore) GetOrganizationsByUserID(ctx context.Context, userID
|
||||
return organizations, err
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetOrganizationsWithPrebuildStatus(ctx context.Context, arg database.GetOrganizationsWithPrebuildStatusParams) ([]database.GetOrganizationsWithPrebuildStatusRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetOrganizationsWithPrebuildStatus(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("GetOrganizationsWithPrebuildStatus").Observe(time.Since(start).Seconds())
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetParameterSchemasByJobID(ctx context.Context, jobID uuid.UUID) ([]database.ParameterSchema, error) {
|
||||
start := time.Now()
|
||||
schemas, err := m.s.GetParameterSchemasByJobID(ctx, jobID)
|
||||
@@ -2517,6 +2538,13 @@ func (m queryMetricsStore) InsertTelemetryItemIfNotExists(ctx context.Context, a
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) InsertTelemetryLock(ctx context.Context, arg database.InsertTelemetryLockParams) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.InsertTelemetryLock(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("InsertTelemetryLock").Observe(time.Since(start).Seconds())
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) InsertTemplate(ctx context.Context, arg database.InsertTemplateParams) error {
|
||||
start := time.Now()
|
||||
err := m.s.InsertTemplate(ctx, arg)
|
||||
@@ -2734,6 +2762,13 @@ func (m queryMetricsStore) ListAIBridgeInterceptions(ctx context.Context, arg da
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) ListAIBridgeInterceptionsTelemetrySummaries(ctx context.Context, arg database.ListAIBridgeInterceptionsTelemetrySummariesParams) ([]database.ListAIBridgeInterceptionsTelemetrySummariesRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.ListAIBridgeInterceptionsTelemetrySummaries(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("ListAIBridgeInterceptionsTelemetrySummaries").Observe(time.Since(start).Seconds())
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) ListAIBridgeTokenUsagesByInterceptionIDs(ctx context.Context, interceptionIds []uuid.UUID) ([]database.AIBridgeTokenUsage, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.ListAIBridgeTokenUsagesByInterceptionIDs(ctx, interceptionIds)
|
||||
@@ -2888,6 +2923,13 @@ func (m queryMetricsStore) UnfavoriteWorkspace(ctx context.Context, arg uuid.UUI
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpdateAIBridgeInterceptionEnded(ctx context.Context, id database.UpdateAIBridgeInterceptionEndedParams) (database.AIBridgeInterception, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.UpdateAIBridgeInterceptionEnded(ctx, id)
|
||||
m.queryLatencies.WithLabelValues("UpdateAIBridgeInterceptionEnded").Observe(time.Since(start).Seconds())
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpdateAPIKeyByID(ctx context.Context, arg database.UpdateAPIKeyByIDParams) error {
|
||||
start := time.Now()
|
||||
err := m.s.UpdateAPIKeyByID(ctx, arg)
|
||||
@@ -3007,7 +3049,7 @@ func (m queryMetricsStore) UpdateOrganizationDeletedByID(ctx context.Context, ar
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg database.UpdatePrebuildProvisionerJobWithCancelParams) ([]uuid.UUID, error) {
|
||||
func (m queryMetricsStore) UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg database.UpdatePrebuildProvisionerJobWithCancelParams) ([]database.UpdatePrebuildProvisionerJobWithCancelRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.UpdatePrebuildProvisionerJobWithCancel(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("UpdatePrebuildProvisionerJobWithCancel").Observe(time.Since(start).Seconds())
|
||||
|
||||
@@ -190,6 +190,21 @@ func (mr *MockStoreMockRecorder) BulkMarkNotificationMessagesSent(ctx, arg any)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BulkMarkNotificationMessagesSent", reflect.TypeOf((*MockStore)(nil).BulkMarkNotificationMessagesSent), ctx, arg)
|
||||
}
|
||||
|
||||
// CalculateAIBridgeInterceptionsTelemetrySummary mocks base method.
|
||||
func (m *MockStore) CalculateAIBridgeInterceptionsTelemetrySummary(ctx context.Context, arg database.CalculateAIBridgeInterceptionsTelemetrySummaryParams) (database.CalculateAIBridgeInterceptionsTelemetrySummaryRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "CalculateAIBridgeInterceptionsTelemetrySummary", ctx, arg)
|
||||
ret0, _ := ret[0].(database.CalculateAIBridgeInterceptionsTelemetrySummaryRow)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// CalculateAIBridgeInterceptionsTelemetrySummary indicates an expected call of CalculateAIBridgeInterceptionsTelemetrySummary.
|
||||
func (mr *MockStoreMockRecorder) CalculateAIBridgeInterceptionsTelemetrySummary(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CalculateAIBridgeInterceptionsTelemetrySummary", reflect.TypeOf((*MockStore)(nil).CalculateAIBridgeInterceptionsTelemetrySummary), ctx, arg)
|
||||
}
|
||||
|
||||
// ClaimPrebuiltWorkspace mocks base method.
|
||||
func (m *MockStore) ClaimPrebuiltWorkspace(ctx context.Context, arg database.ClaimPrebuiltWorkspaceParams) (database.ClaimPrebuiltWorkspaceRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -736,6 +751,20 @@ func (mr *MockStoreMockRecorder) DeleteOldProvisionerDaemons(ctx any) *gomock.Ca
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOldProvisionerDaemons", reflect.TypeOf((*MockStore)(nil).DeleteOldProvisionerDaemons), ctx)
|
||||
}
|
||||
|
||||
// DeleteOldTelemetryLocks mocks base method.
|
||||
func (m *MockStore) DeleteOldTelemetryLocks(ctx context.Context, periodEndingAtBefore time.Time) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DeleteOldTelemetryLocks", ctx, periodEndingAtBefore)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// DeleteOldTelemetryLocks indicates an expected call of DeleteOldTelemetryLocks.
|
||||
func (mr *MockStoreMockRecorder) DeleteOldTelemetryLocks(ctx, periodEndingAtBefore any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOldTelemetryLocks", reflect.TypeOf((*MockStore)(nil).DeleteOldTelemetryLocks), ctx, periodEndingAtBefore)
|
||||
}
|
||||
|
||||
// DeleteOldWorkspaceAgentLogs mocks base method.
|
||||
func (m *MockStore) DeleteOldWorkspaceAgentLogs(ctx context.Context, threshold time.Time) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -2593,6 +2622,21 @@ func (mr *MockStoreMockRecorder) GetOrganizationsByUserID(ctx, arg any) *gomock.
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOrganizationsByUserID", reflect.TypeOf((*MockStore)(nil).GetOrganizationsByUserID), ctx, arg)
|
||||
}
|
||||
|
||||
// GetOrganizationsWithPrebuildStatus mocks base method.
|
||||
func (m *MockStore) GetOrganizationsWithPrebuildStatus(ctx context.Context, arg database.GetOrganizationsWithPrebuildStatusParams) ([]database.GetOrganizationsWithPrebuildStatusRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetOrganizationsWithPrebuildStatus", ctx, arg)
|
||||
ret0, _ := ret[0].([]database.GetOrganizationsWithPrebuildStatusRow)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetOrganizationsWithPrebuildStatus indicates an expected call of GetOrganizationsWithPrebuildStatus.
|
||||
func (mr *MockStoreMockRecorder) GetOrganizationsWithPrebuildStatus(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOrganizationsWithPrebuildStatus", reflect.TypeOf((*MockStore)(nil).GetOrganizationsWithPrebuildStatus), ctx, arg)
|
||||
}
|
||||
|
||||
// GetParameterSchemasByJobID mocks base method.
|
||||
func (m *MockStore) GetParameterSchemasByJobID(ctx context.Context, jobID uuid.UUID) ([]database.ParameterSchema, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -5392,6 +5436,20 @@ func (mr *MockStoreMockRecorder) InsertTelemetryItemIfNotExists(ctx, arg any) *g
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertTelemetryItemIfNotExists", reflect.TypeOf((*MockStore)(nil).InsertTelemetryItemIfNotExists), ctx, arg)
|
||||
}
|
||||
|
||||
// InsertTelemetryLock mocks base method.
|
||||
func (m *MockStore) InsertTelemetryLock(ctx context.Context, arg database.InsertTelemetryLockParams) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "InsertTelemetryLock", ctx, arg)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// InsertTelemetryLock indicates an expected call of InsertTelemetryLock.
|
||||
func (mr *MockStoreMockRecorder) InsertTelemetryLock(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertTelemetryLock", reflect.TypeOf((*MockStore)(nil).InsertTelemetryLock), ctx, arg)
|
||||
}
|
||||
|
||||
// InsertTemplate mocks base method.
|
||||
func (m *MockStore) InsertTemplate(ctx context.Context, arg database.InsertTemplateParams) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -5847,6 +5905,21 @@ func (mr *MockStoreMockRecorder) ListAIBridgeInterceptions(ctx, arg any) *gomock
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAIBridgeInterceptions", reflect.TypeOf((*MockStore)(nil).ListAIBridgeInterceptions), ctx, arg)
|
||||
}
|
||||
|
||||
// ListAIBridgeInterceptionsTelemetrySummaries mocks base method.
|
||||
func (m *MockStore) ListAIBridgeInterceptionsTelemetrySummaries(ctx context.Context, arg database.ListAIBridgeInterceptionsTelemetrySummariesParams) ([]database.ListAIBridgeInterceptionsTelemetrySummariesRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ListAIBridgeInterceptionsTelemetrySummaries", ctx, arg)
|
||||
ret0, _ := ret[0].([]database.ListAIBridgeInterceptionsTelemetrySummariesRow)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ListAIBridgeInterceptionsTelemetrySummaries indicates an expected call of ListAIBridgeInterceptionsTelemetrySummaries.
|
||||
func (mr *MockStoreMockRecorder) ListAIBridgeInterceptionsTelemetrySummaries(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAIBridgeInterceptionsTelemetrySummaries", reflect.TypeOf((*MockStore)(nil).ListAIBridgeInterceptionsTelemetrySummaries), ctx, arg)
|
||||
}
|
||||
|
||||
// ListAIBridgeTokenUsagesByInterceptionIDs mocks base method.
|
||||
func (m *MockStore) ListAIBridgeTokenUsagesByInterceptionIDs(ctx context.Context, interceptionIds []uuid.UUID) ([]database.AIBridgeTokenUsage, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -6216,6 +6289,21 @@ func (mr *MockStoreMockRecorder) UnfavoriteWorkspace(ctx, id any) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnfavoriteWorkspace", reflect.TypeOf((*MockStore)(nil).UnfavoriteWorkspace), ctx, id)
|
||||
}
|
||||
|
||||
// UpdateAIBridgeInterceptionEnded mocks base method.
|
||||
func (m *MockStore) UpdateAIBridgeInterceptionEnded(ctx context.Context, arg database.UpdateAIBridgeInterceptionEndedParams) (database.AIBridgeInterception, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UpdateAIBridgeInterceptionEnded", ctx, arg)
|
||||
ret0, _ := ret[0].(database.AIBridgeInterception)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// UpdateAIBridgeInterceptionEnded indicates an expected call of UpdateAIBridgeInterceptionEnded.
|
||||
func (mr *MockStoreMockRecorder) UpdateAIBridgeInterceptionEnded(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAIBridgeInterceptionEnded", reflect.TypeOf((*MockStore)(nil).UpdateAIBridgeInterceptionEnded), ctx, arg)
|
||||
}
|
||||
|
||||
// UpdateAPIKeyByID mocks base method.
|
||||
func (m *MockStore) UpdateAPIKeyByID(ctx context.Context, arg database.UpdateAPIKeyByIDParams) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -6467,10 +6555,10 @@ func (mr *MockStoreMockRecorder) UpdateOrganizationDeletedByID(ctx, arg any) *go
|
||||
}
|
||||
|
||||
// UpdatePrebuildProvisionerJobWithCancel mocks base method.
|
||||
func (m *MockStore) UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg database.UpdatePrebuildProvisionerJobWithCancelParams) ([]uuid.UUID, error) {
|
||||
func (m *MockStore) UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg database.UpdatePrebuildProvisionerJobWithCancelParams) ([]database.UpdatePrebuildProvisionerJobWithCancelRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UpdatePrebuildProvisionerJobWithCancel", ctx, arg)
|
||||
ret0, _ := ret[0].([]uuid.UUID)
|
||||
ret0, _ := ret[0].([]database.UpdatePrebuildProvisionerJobWithCancelRow)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
@@ -24,6 +24,12 @@ const (
|
||||
// but we won't touch the `connection_logs` table.
|
||||
maxAuditLogConnectionEventAge = 90 * 24 * time.Hour // 90 days
|
||||
auditLogConnectionEventBatchSize = 1000
|
||||
// Telemetry heartbeats are used to deduplicate events across replicas. We
|
||||
// don't need to persist heartbeat rows for longer than 24 hours, as they
|
||||
// are only used for deduplication across replicas. The time needs to be
|
||||
// long enough to cover the maximum interval of a heartbeat event (currently
|
||||
// 1 hour) plus some buffer.
|
||||
maxTelemetryHeartbeatAge = 24 * time.Hour
|
||||
)
|
||||
|
||||
// New creates a new periodically purging database instance.
|
||||
@@ -71,6 +77,10 @@ func New(ctx context.Context, logger slog.Logger, db database.Store, clk quartz.
|
||||
if err := tx.ExpirePrebuildsAPIKeys(ctx, dbtime.Time(start)); err != nil {
|
||||
return xerrors.Errorf("failed to expire prebuilds user api keys: %w", err)
|
||||
}
|
||||
deleteOldTelemetryLocksBefore := start.Add(-maxTelemetryHeartbeatAge)
|
||||
if err := tx.DeleteOldTelemetryLocks(ctx, deleteOldTelemetryLocksBefore); err != nil {
|
||||
return xerrors.Errorf("failed to delete old telemetry locks: %w", err)
|
||||
}
|
||||
|
||||
deleteOldAuditLogConnectionEventsBefore := start.Add(-maxAuditLogConnectionEventAge)
|
||||
if err := tx.DeleteOldAuditLogConnectionEvents(ctx, database.DeleteOldAuditLogConnectionEventsParams{
|
||||
|
||||
@@ -704,3 +704,56 @@ func TestExpireOldAPIKeys(t *testing.T) {
|
||||
// Out of an abundance of caution, we do not expire explicitly named prebuilds API keys.
|
||||
assertKeyActive(namedPrebuildsAPIKey.ID)
|
||||
}
|
||||
|
||||
func TestDeleteOldTelemetryHeartbeats(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
db, _, sqlDB := dbtestutil.NewDBWithSQLDB(t, dbtestutil.WithDumpOnFailure())
|
||||
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true})
|
||||
clk := quartz.NewMock(t)
|
||||
now := clk.Now().UTC()
|
||||
|
||||
// Insert telemetry heartbeats.
|
||||
err := db.InsertTelemetryLock(ctx, database.InsertTelemetryLockParams{
|
||||
EventType: "aibridge_interceptions_summary",
|
||||
PeriodEndingAt: now.Add(-25 * time.Hour), // should be purged
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = db.InsertTelemetryLock(ctx, database.InsertTelemetryLockParams{
|
||||
EventType: "aibridge_interceptions_summary",
|
||||
PeriodEndingAt: now.Add(-23 * time.Hour), // should be kept
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = db.InsertTelemetryLock(ctx, database.InsertTelemetryLockParams{
|
||||
EventType: "aibridge_interceptions_summary",
|
||||
PeriodEndingAt: now, // should be kept
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
done := awaitDoTick(ctx, t, clk)
|
||||
closer := dbpurge.New(ctx, logger, db, clk)
|
||||
defer closer.Close()
|
||||
<-done // doTick() has now run.
|
||||
|
||||
require.Eventuallyf(t, func() bool {
|
||||
// We use an SQL queries directly here because we don't expose queries
|
||||
// for deleting heartbeats in the application code.
|
||||
var totalCount int
|
||||
err := sqlDB.QueryRowContext(ctx, `
|
||||
SELECT COUNT(*) FROM telemetry_locks;
|
||||
`).Scan(&totalCount)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var oldCount int
|
||||
err = sqlDB.QueryRowContext(ctx, `
|
||||
SELECT COUNT(*) FROM telemetry_locks WHERE period_ending_at < $1;
|
||||
`, now.Add(-24*time.Hour)).Scan(&oldCount)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Expect 2 heartbeats remaining and none older than 24 hours.
|
||||
t.Logf("eventually: total count: %d, old count: %d", totalCount, oldCount)
|
||||
return totalCount == 2 && oldCount == 0
|
||||
}, testutil.WaitShort, testutil.IntervalFast, "it should delete old telemetry heartbeats")
|
||||
}
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -45,6 +47,8 @@ func (b *Broker) Create(t TBSubset, opts ...OpenOption) (ConnectionParams, error
|
||||
host = defaultConnectionParams.Host
|
||||
port = defaultConnectionParams.Port
|
||||
)
|
||||
packageName := getTestPackageName(t)
|
||||
testName := t.Name()
|
||||
|
||||
// Use a time-based prefix to make it easier to find the database
|
||||
// when debugging.
|
||||
@@ -55,9 +59,9 @@ func (b *Broker) Create(t TBSubset, opts ...OpenOption) (ConnectionParams, error
|
||||
}
|
||||
dbName := now + "_" + dbSuffix
|
||||
|
||||
// TODO: add package and test name
|
||||
_, err = b.coderTestingDB.Exec(
|
||||
"INSERT INTO test_databases (name, process_uuid) VALUES ($1, $2)", dbName, b.uuid)
|
||||
"INSERT INTO test_databases (name, process_uuid, test_package, test_name) VALUES ($1, $2, $3, $4)",
|
||||
dbName, b.uuid, packageName, testName)
|
||||
if err != nil {
|
||||
return ConnectionParams{}, xerrors.Errorf("insert test_database row: %w", err)
|
||||
}
|
||||
@@ -104,10 +108,10 @@ func (b *Broker) clean(t TBSubset, dbName string) func() {
|
||||
func (b *Broker) init(t TBSubset) error {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
b.refCount++
|
||||
t.Cleanup(b.decRef)
|
||||
if b.coderTestingDB != nil {
|
||||
// already initialized
|
||||
b.refCount++
|
||||
t.Cleanup(b.decRef)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -124,8 +128,8 @@ func (b *Broker) init(t TBSubset) error {
|
||||
return xerrors.Errorf("open postgres connection: %w", err)
|
||||
}
|
||||
|
||||
// creating the db can succeed even if the database doesn't exist. Ping it to find out.
|
||||
err = coderTestingDB.Ping()
|
||||
// coderTestingSQLInit is idempotent, so we can run it every time.
|
||||
_, err = coderTestingDB.Exec(coderTestingSQLInit)
|
||||
var pqErr *pq.Error
|
||||
if xerrors.As(err, &pqErr) && pqErr.Code == "3D000" {
|
||||
// database does not exist.
|
||||
@@ -145,6 +149,8 @@ func (b *Broker) init(t TBSubset) error {
|
||||
return xerrors.Errorf("ping '%s' database: %w", CoderTestingDBName, err)
|
||||
}
|
||||
b.coderTestingDB = coderTestingDB
|
||||
b.refCount++
|
||||
t.Cleanup(b.decRef)
|
||||
|
||||
if b.uuid == uuid.Nil {
|
||||
b.uuid = uuid.New()
|
||||
@@ -186,3 +192,42 @@ func (b *Broker) decRef() {
|
||||
b.coderTestingDB = nil
|
||||
}
|
||||
}
|
||||
|
||||
// getTestPackageName returns the package name of the test that called it.
|
||||
func getTestPackageName(t TBSubset) string {
|
||||
packageName := "unknown"
|
||||
// Ask runtime.Callers for up to 100 program counters, including runtime.Callers itself.
|
||||
pc := make([]uintptr, 100)
|
||||
n := runtime.Callers(0, pc)
|
||||
if n == 0 {
|
||||
// No PCs available. This can happen if the first argument to
|
||||
// runtime.Callers is large.
|
||||
//
|
||||
// Return now to avoid processing the zero Frame that would
|
||||
// otherwise be returned by frames.Next below.
|
||||
t.Logf("could not determine test package name: no PCs available")
|
||||
return packageName
|
||||
}
|
||||
|
||||
pc = pc[:n] // pass only valid pcs to runtime.CallersFrames
|
||||
frames := runtime.CallersFrames(pc)
|
||||
|
||||
// Loop to get frames.
|
||||
// A fixed number of PCs can expand to an indefinite number of Frames.
|
||||
for {
|
||||
frame, more := frames.Next()
|
||||
|
||||
if strings.HasPrefix(frame.Function, "github.com/coder/coder/v2/") {
|
||||
packageName = strings.SplitN(strings.TrimPrefix(frame.Function, "github.com/coder/coder/v2/"), ".", 2)[0]
|
||||
}
|
||||
if strings.HasPrefix(frame.Function, "testing") {
|
||||
break
|
||||
}
|
||||
|
||||
// Check whether there are more frames to process after this one.
|
||||
if !more {
|
||||
break
|
||||
}
|
||||
}
|
||||
return packageName
|
||||
}
|
||||
|
||||
@@ -0,0 +1,13 @@
|
||||
package dbtestutil
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetTestPackageName(t *testing.T) {
|
||||
t.Parallel()
|
||||
packageName := getTestPackageName(t)
|
||||
require.Equal(t, "coderd/database/dbtestutil", packageName)
|
||||
}
|
||||
@@ -1,3 +1,6 @@
|
||||
BEGIN TRANSACTION;
|
||||
SELECT pg_advisory_xact_lock(7283699);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS test_databases (
|
||||
name text PRIMARY KEY,
|
||||
created_at timestamp with time zone NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
@@ -6,3 +9,10 @@ CREATE TABLE IF NOT EXISTS test_databases (
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS test_databases_process_uuid ON test_databases (process_uuid, dropped_at);
|
||||
|
||||
ALTER TABLE test_databases ADD COLUMN IF NOT EXISTS test_name text;
|
||||
COMMENT ON COLUMN test_databases.test_name IS 'Name of the test that created the database';
|
||||
ALTER TABLE test_databases ADD COLUMN IF NOT EXISTS test_package text;
|
||||
COMMENT ON COLUMN test_databases.test_package IS 'Package of the test that created the database';
|
||||
|
||||
COMMIT;
|
||||
|
||||
Generated
+41
-14
@@ -1828,6 +1828,15 @@ CREATE TABLE tasks (
|
||||
deleted_at timestamp with time zone
|
||||
);
|
||||
|
||||
CREATE VIEW visible_users AS
|
||||
SELECT users.id,
|
||||
users.username,
|
||||
users.name,
|
||||
users.avatar_url
|
||||
FROM users;
|
||||
|
||||
COMMENT ON VIEW visible_users IS 'Visible fields of users are allowed to be joined with other tables for including context of other resources.';
|
||||
|
||||
CREATE TABLE workspace_agents (
|
||||
id uuid NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
@@ -1978,8 +1987,16 @@ CREATE VIEW tasks_with_status AS
|
||||
END AS status,
|
||||
task_app.workspace_build_number,
|
||||
task_app.workspace_agent_id,
|
||||
task_app.workspace_app_id
|
||||
FROM ((((tasks
|
||||
task_app.workspace_app_id,
|
||||
task_owner.owner_username,
|
||||
task_owner.owner_name,
|
||||
task_owner.owner_avatar_url
|
||||
FROM (((((tasks
|
||||
CROSS JOIN LATERAL ( SELECT vu.username AS owner_username,
|
||||
vu.name AS owner_name,
|
||||
vu.avatar_url AS owner_avatar_url
|
||||
FROM visible_users vu
|
||||
WHERE (vu.id = tasks.owner_id)) task_owner)
|
||||
LEFT JOIN LATERAL ( SELECT task_app_1.workspace_build_number,
|
||||
task_app_1.workspace_agent_id,
|
||||
task_app_1.workspace_app_id
|
||||
@@ -2012,6 +2029,18 @@ CREATE TABLE telemetry_items (
|
||||
updated_at timestamp with time zone DEFAULT now() NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE telemetry_locks (
|
||||
event_type text NOT NULL,
|
||||
period_ending_at timestamp with time zone NOT NULL,
|
||||
CONSTRAINT telemetry_lock_event_type_constraint CHECK ((event_type = 'aibridge_interceptions_summary'::text))
|
||||
);
|
||||
|
||||
COMMENT ON TABLE telemetry_locks IS 'Telemetry lock tracking table for deduplication of heartbeat events across replicas.';
|
||||
|
||||
COMMENT ON COLUMN telemetry_locks.event_type IS 'The type of event that was sent.';
|
||||
|
||||
COMMENT ON COLUMN telemetry_locks.period_ending_at IS 'The heartbeat period end timestamp.';
|
||||
|
||||
CREATE TABLE template_usage_stats (
|
||||
start_time timestamp with time zone NOT NULL,
|
||||
end_time timestamp with time zone NOT NULL,
|
||||
@@ -2198,15 +2227,6 @@ COMMENT ON COLUMN template_versions.external_auth_providers IS 'IDs of External
|
||||
|
||||
COMMENT ON COLUMN template_versions.message IS 'Message describing the changes in this version of the template, similar to a Git commit message. Like a commit message, this should be a short, high-level description of the changes in this version of the template. This message is immutable and should not be updated after the fact.';
|
||||
|
||||
CREATE VIEW visible_users AS
|
||||
SELECT users.id,
|
||||
users.username,
|
||||
users.name,
|
||||
users.avatar_url
|
||||
FROM users;
|
||||
|
||||
COMMENT ON VIEW visible_users IS 'Visible fields of users are allowed to be joined with other tables for including context of other resources.';
|
||||
|
||||
CREATE VIEW template_version_with_user AS
|
||||
SELECT template_versions.id,
|
||||
template_versions.template_id,
|
||||
@@ -2902,11 +2922,13 @@ CREATE VIEW workspaces_expanded AS
|
||||
templates.name AS template_name,
|
||||
templates.display_name AS template_display_name,
|
||||
templates.icon AS template_icon,
|
||||
templates.description AS template_description
|
||||
FROM (((workspaces
|
||||
templates.description AS template_description,
|
||||
tasks.id AS task_id
|
||||
FROM ((((workspaces
|
||||
JOIN visible_users ON ((workspaces.owner_id = visible_users.id)))
|
||||
JOIN organizations ON ((workspaces.organization_id = organizations.id)))
|
||||
JOIN templates ON ((workspaces.template_id = templates.id)));
|
||||
JOIN templates ON ((workspaces.template_id = templates.id)))
|
||||
LEFT JOIN tasks ON ((workspaces.id = tasks.workspace_id)));
|
||||
|
||||
COMMENT ON VIEW workspaces_expanded IS 'Joins in the display name information such as username, avatar, and organization name.';
|
||||
|
||||
@@ -3090,6 +3112,9 @@ ALTER TABLE ONLY tasks
|
||||
ALTER TABLE ONLY telemetry_items
|
||||
ADD CONSTRAINT telemetry_items_pkey PRIMARY KEY (key);
|
||||
|
||||
ALTER TABLE ONLY telemetry_locks
|
||||
ADD CONSTRAINT telemetry_locks_pkey PRIMARY KEY (event_type, period_ending_at);
|
||||
|
||||
ALTER TABLE ONLY template_usage_stats
|
||||
ADD CONSTRAINT template_usage_stats_pkey PRIMARY KEY (start_time, template_id, user_id);
|
||||
|
||||
@@ -3315,6 +3340,8 @@ CREATE INDEX idx_tailnet_tunnels_dst_id ON tailnet_tunnels USING hash (dst_id);
|
||||
|
||||
CREATE INDEX idx_tailnet_tunnels_src_id ON tailnet_tunnels USING hash (src_id);
|
||||
|
||||
CREATE INDEX idx_telemetry_locks_period_ending_at ON telemetry_locks USING btree (period_ending_at);
|
||||
|
||||
CREATE UNIQUE INDEX idx_template_version_presets_default ON template_version_presets USING btree (template_version_id) WHERE (is_default = true);
|
||||
|
||||
CREATE INDEX idx_template_versions_has_ai_task ON template_versions USING btree (has_ai_task);
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE telemetry_locks;
|
||||
@@ -0,0 +1,12 @@
|
||||
CREATE TABLE telemetry_locks (
|
||||
event_type TEXT NOT NULL CONSTRAINT telemetry_lock_event_type_constraint CHECK (event_type IN ('aibridge_interceptions_summary')),
|
||||
period_ending_at TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
|
||||
PRIMARY KEY (event_type, period_ending_at)
|
||||
);
|
||||
|
||||
COMMENT ON TABLE telemetry_locks IS 'Telemetry lock tracking table for deduplication of heartbeat events across replicas.';
|
||||
COMMENT ON COLUMN telemetry_locks.event_type IS 'The type of event that was sent.';
|
||||
COMMENT ON COLUMN telemetry_locks.period_ending_at IS 'The heartbeat period end timestamp.';
|
||||
|
||||
CREATE INDEX idx_telemetry_locks_period_ending_at ON telemetry_locks (period_ending_at);
|
||||
@@ -0,0 +1,74 @@
|
||||
-- Drop view from 000390_tasks_with_status_user_fields.up.sql.
|
||||
DROP VIEW IF EXISTS tasks_with_status;
|
||||
|
||||
-- Restore from 000382_add_columns_to_tasks_with_status.up.sql.
|
||||
CREATE VIEW
|
||||
tasks_with_status
|
||||
AS
|
||||
SELECT
|
||||
tasks.*,
|
||||
CASE
|
||||
WHEN tasks.workspace_id IS NULL OR latest_build.job_status IS NULL THEN 'pending'::task_status
|
||||
|
||||
WHEN latest_build.job_status = 'failed' THEN 'error'::task_status
|
||||
|
||||
WHEN latest_build.transition IN ('stop', 'delete')
|
||||
AND latest_build.job_status = 'succeeded' THEN 'paused'::task_status
|
||||
|
||||
WHEN latest_build.transition = 'start'
|
||||
AND latest_build.job_status = 'pending' THEN 'initializing'::task_status
|
||||
|
||||
WHEN latest_build.transition = 'start' AND latest_build.job_status IN ('running', 'succeeded') THEN
|
||||
CASE
|
||||
WHEN agent_status.none THEN 'initializing'::task_status
|
||||
WHEN agent_status.connecting THEN 'initializing'::task_status
|
||||
WHEN agent_status.connected THEN
|
||||
CASE
|
||||
WHEN app_status.any_unhealthy THEN 'error'::task_status
|
||||
WHEN app_status.any_initializing THEN 'initializing'::task_status
|
||||
WHEN app_status.all_healthy_or_disabled THEN 'active'::task_status
|
||||
ELSE 'unknown'::task_status
|
||||
END
|
||||
ELSE 'unknown'::task_status
|
||||
END
|
||||
|
||||
ELSE 'unknown'::task_status
|
||||
END AS status,
|
||||
task_app.*
|
||||
FROM
|
||||
tasks
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT workspace_build_number, workspace_agent_id, workspace_app_id
|
||||
FROM task_workspace_apps task_app
|
||||
WHERE task_id = tasks.id
|
||||
ORDER BY workspace_build_number DESC
|
||||
LIMIT 1
|
||||
) task_app ON TRUE
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT
|
||||
workspace_build.transition,
|
||||
provisioner_job.job_status,
|
||||
workspace_build.job_id
|
||||
FROM workspace_builds workspace_build
|
||||
JOIN provisioner_jobs provisioner_job ON provisioner_job.id = workspace_build.job_id
|
||||
WHERE workspace_build.workspace_id = tasks.workspace_id
|
||||
AND workspace_build.build_number = task_app.workspace_build_number
|
||||
) latest_build ON TRUE
|
||||
CROSS JOIN LATERAL (
|
||||
SELECT
|
||||
COUNT(*) = 0 AS none,
|
||||
bool_or(workspace_agent.lifecycle_state IN ('created', 'starting')) AS connecting,
|
||||
bool_and(workspace_agent.lifecycle_state = 'ready') AS connected
|
||||
FROM workspace_agents workspace_agent
|
||||
WHERE workspace_agent.id = task_app.workspace_agent_id
|
||||
) agent_status
|
||||
CROSS JOIN LATERAL (
|
||||
SELECT
|
||||
bool_or(workspace_app.health = 'unhealthy') AS any_unhealthy,
|
||||
bool_or(workspace_app.health = 'initializing') AS any_initializing,
|
||||
bool_and(workspace_app.health IN ('healthy', 'disabled')) AS all_healthy_or_disabled
|
||||
FROM workspace_apps workspace_app
|
||||
WHERE workspace_app.id = task_app.workspace_app_id
|
||||
) app_status
|
||||
WHERE
|
||||
tasks.deleted_at IS NULL;
|
||||
@@ -0,0 +1,84 @@
|
||||
-- Drop view from 00037_add_columns_to_tasks_with_status.up.sql.
|
||||
DROP VIEW IF EXISTS tasks_with_status;
|
||||
|
||||
-- Add owner_name, owner_avatar_url columns.
|
||||
CREATE VIEW
|
||||
tasks_with_status
|
||||
AS
|
||||
SELECT
|
||||
tasks.*,
|
||||
CASE
|
||||
WHEN tasks.workspace_id IS NULL OR latest_build.job_status IS NULL THEN 'pending'::task_status
|
||||
|
||||
WHEN latest_build.job_status = 'failed' THEN 'error'::task_status
|
||||
|
||||
WHEN latest_build.transition IN ('stop', 'delete')
|
||||
AND latest_build.job_status = 'succeeded' THEN 'paused'::task_status
|
||||
|
||||
WHEN latest_build.transition = 'start'
|
||||
AND latest_build.job_status = 'pending' THEN 'initializing'::task_status
|
||||
|
||||
WHEN latest_build.transition = 'start' AND latest_build.job_status IN ('running', 'succeeded') THEN
|
||||
CASE
|
||||
WHEN agent_status.none THEN 'initializing'::task_status
|
||||
WHEN agent_status.connecting THEN 'initializing'::task_status
|
||||
WHEN agent_status.connected THEN
|
||||
CASE
|
||||
WHEN app_status.any_unhealthy THEN 'error'::task_status
|
||||
WHEN app_status.any_initializing THEN 'initializing'::task_status
|
||||
WHEN app_status.all_healthy_or_disabled THEN 'active'::task_status
|
||||
ELSE 'unknown'::task_status
|
||||
END
|
||||
ELSE 'unknown'::task_status
|
||||
END
|
||||
|
||||
ELSE 'unknown'::task_status
|
||||
END AS status,
|
||||
task_app.*,
|
||||
task_owner.*
|
||||
FROM
|
||||
tasks
|
||||
CROSS JOIN LATERAL (
|
||||
SELECT
|
||||
vu.username AS owner_username,
|
||||
vu.name AS owner_name,
|
||||
vu.avatar_url AS owner_avatar_url
|
||||
FROM visible_users vu
|
||||
WHERE vu.id = tasks.owner_id
|
||||
) task_owner
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT workspace_build_number, workspace_agent_id, workspace_app_id
|
||||
FROM task_workspace_apps task_app
|
||||
WHERE task_id = tasks.id
|
||||
ORDER BY workspace_build_number DESC
|
||||
LIMIT 1
|
||||
) task_app ON TRUE
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT
|
||||
workspace_build.transition,
|
||||
provisioner_job.job_status,
|
||||
workspace_build.job_id
|
||||
FROM workspace_builds workspace_build
|
||||
JOIN provisioner_jobs provisioner_job ON provisioner_job.id = workspace_build.job_id
|
||||
WHERE workspace_build.workspace_id = tasks.workspace_id
|
||||
AND workspace_build.build_number = task_app.workspace_build_number
|
||||
) latest_build ON TRUE
|
||||
CROSS JOIN LATERAL (
|
||||
SELECT
|
||||
COUNT(*) = 0 AS none,
|
||||
bool_or(workspace_agent.lifecycle_state IN ('created', 'starting')) AS connecting,
|
||||
bool_and(workspace_agent.lifecycle_state = 'ready') AS connected
|
||||
FROM workspace_agents workspace_agent
|
||||
WHERE workspace_agent.id = task_app.workspace_agent_id
|
||||
) agent_status
|
||||
CROSS JOIN LATERAL (
|
||||
SELECT
|
||||
bool_or(workspace_app.health = 'unhealthy') AS any_unhealthy,
|
||||
bool_or(workspace_app.health = 'initializing') AS any_initializing,
|
||||
bool_and(workspace_app.health IN ('healthy', 'disabled')) AS all_healthy_or_disabled
|
||||
FROM workspace_apps workspace_app
|
||||
WHERE workspace_app.id = task_app.workspace_app_id
|
||||
) app_status
|
||||
WHERE
|
||||
tasks.deleted_at IS NULL;
|
||||
|
||||
@@ -0,0 +1,8 @@
|
||||
UPDATE notification_templates
|
||||
SET enabled_by_default = true
|
||||
WHERE id IN (
|
||||
'8c5a4d12-9f7e-4b3a-a1c8-6e4f2d9b5a7c',
|
||||
'3b7e8f1a-4c2d-49a6-b5e9-7f3a1c8d6b4e',
|
||||
'bd4b7168-d05e-4e19-ad0f-3593b77aa90f',
|
||||
'd4a6271c-cced-4ed0-84ad-afd02a9c7799'
|
||||
);
|
||||
@@ -0,0 +1,8 @@
|
||||
UPDATE notification_templates
|
||||
SET enabled_by_default = false
|
||||
WHERE id IN (
|
||||
'8c5a4d12-9f7e-4b3a-a1c8-6e4f2d9b5a7c',
|
||||
'3b7e8f1a-4c2d-49a6-b5e9-7f3a1c8d6b4e',
|
||||
'bd4b7168-d05e-4e19-ad0f-3593b77aa90f',
|
||||
'd4a6271c-cced-4ed0-84ad-afd02a9c7799'
|
||||
);
|
||||
@@ -0,0 +1,39 @@
|
||||
DROP VIEW workspaces_expanded;
|
||||
|
||||
-- Recreate the view from 000354_workspace_acl.up.sql
|
||||
CREATE VIEW workspaces_expanded AS
|
||||
SELECT workspaces.id,
|
||||
workspaces.created_at,
|
||||
workspaces.updated_at,
|
||||
workspaces.owner_id,
|
||||
workspaces.organization_id,
|
||||
workspaces.template_id,
|
||||
workspaces.deleted,
|
||||
workspaces.name,
|
||||
workspaces.autostart_schedule,
|
||||
workspaces.ttl,
|
||||
workspaces.last_used_at,
|
||||
workspaces.dormant_at,
|
||||
workspaces.deleting_at,
|
||||
workspaces.automatic_updates,
|
||||
workspaces.favorite,
|
||||
workspaces.next_start_at,
|
||||
workspaces.group_acl,
|
||||
workspaces.user_acl,
|
||||
visible_users.avatar_url AS owner_avatar_url,
|
||||
visible_users.username AS owner_username,
|
||||
visible_users.name AS owner_name,
|
||||
organizations.name AS organization_name,
|
||||
organizations.display_name AS organization_display_name,
|
||||
organizations.icon AS organization_icon,
|
||||
organizations.description AS organization_description,
|
||||
templates.name AS template_name,
|
||||
templates.display_name AS template_display_name,
|
||||
templates.icon AS template_icon,
|
||||
templates.description AS template_description
|
||||
FROM (((workspaces
|
||||
JOIN visible_users ON ((workspaces.owner_id = visible_users.id)))
|
||||
JOIN organizations ON ((workspaces.organization_id = organizations.id)))
|
||||
JOIN templates ON ((workspaces.template_id = templates.id)));
|
||||
|
||||
COMMENT ON VIEW workspaces_expanded IS 'Joins in the display name information such as username, avatar, and organization name.';
|
||||
@@ -0,0 +1,42 @@
|
||||
DROP VIEW workspaces_expanded;
|
||||
|
||||
-- Add nullable task_id to workspaces_expanded view
|
||||
CREATE VIEW workspaces_expanded AS
|
||||
SELECT workspaces.id,
|
||||
workspaces.created_at,
|
||||
workspaces.updated_at,
|
||||
workspaces.owner_id,
|
||||
workspaces.organization_id,
|
||||
workspaces.template_id,
|
||||
workspaces.deleted,
|
||||
workspaces.name,
|
||||
workspaces.autostart_schedule,
|
||||
workspaces.ttl,
|
||||
workspaces.last_used_at,
|
||||
workspaces.dormant_at,
|
||||
workspaces.deleting_at,
|
||||
workspaces.automatic_updates,
|
||||
workspaces.favorite,
|
||||
workspaces.next_start_at,
|
||||
workspaces.group_acl,
|
||||
workspaces.user_acl,
|
||||
visible_users.avatar_url AS owner_avatar_url,
|
||||
visible_users.username AS owner_username,
|
||||
visible_users.name AS owner_name,
|
||||
organizations.name AS organization_name,
|
||||
organizations.display_name AS organization_display_name,
|
||||
organizations.icon AS organization_icon,
|
||||
organizations.description AS organization_description,
|
||||
templates.name AS template_name,
|
||||
templates.display_name AS template_display_name,
|
||||
templates.icon AS template_icon,
|
||||
templates.description AS template_description,
|
||||
tasks.id AS task_id
|
||||
FROM ((((workspaces
|
||||
JOIN visible_users ON ((workspaces.owner_id = visible_users.id)))
|
||||
JOIN organizations ON ((workspaces.organization_id = organizations.id)))
|
||||
JOIN templates ON ((workspaces.template_id = templates.id)))
|
||||
LEFT JOIN tasks ON ((workspaces.id = tasks.workspace_id)));
|
||||
|
||||
COMMENT ON VIEW workspaces_expanded IS 'Joins in the display name information such as username, avatar, and organization name.';
|
||||
|
||||
@@ -0,0 +1,8 @@
|
||||
INSERT INTO telemetry_locks (
|
||||
event_type,
|
||||
period_ending_at
|
||||
)
|
||||
VALUES (
|
||||
'aibridge_interceptions_summary',
|
||||
'2025-01-01 00:00:00+00'::timestamptz
|
||||
);
|
||||
@@ -208,6 +208,7 @@ func (s APIKeyScopes) expandRBACScope() (rbac.Scope, error) {
|
||||
for orgID, perms := range expanded.ByOrgID {
|
||||
orgPerms := merged.ByOrgID[orgID]
|
||||
orgPerms.Org = append(orgPerms.Org, perms.Org...)
|
||||
orgPerms.Member = append(orgPerms.Member, perms.Member...)
|
||||
merged.ByOrgID[orgID] = orgPerms
|
||||
}
|
||||
merged.User = append(merged.User, expanded.User...)
|
||||
@@ -220,6 +221,7 @@ func (s APIKeyScopes) expandRBACScope() (rbac.Scope, error) {
|
||||
merged.User = rbac.DeduplicatePermissions(merged.User)
|
||||
for orgID, perms := range merged.ByOrgID {
|
||||
perms.Org = rbac.DeduplicatePermissions(perms.Org)
|
||||
perms.Member = rbac.DeduplicatePermissions(perms.Member)
|
||||
merged.ByOrgID[orgID] = perms
|
||||
}
|
||||
|
||||
|
||||
@@ -321,6 +321,7 @@ func (q *sqlQuerier) GetAuthorizedWorkspaces(ctx context.Context, arg GetWorkspa
|
||||
&i.TemplateDisplayName,
|
||||
&i.TemplateIcon,
|
||||
&i.TemplateDescription,
|
||||
&i.TaskID,
|
||||
&i.TemplateVersionID,
|
||||
&i.TemplateVersionName,
|
||||
&i.LatestBuildCompletedAt,
|
||||
@@ -328,7 +329,6 @@ func (q *sqlQuerier) GetAuthorizedWorkspaces(ctx context.Context, arg GetWorkspa
|
||||
&i.LatestBuildError,
|
||||
&i.LatestBuildTransition,
|
||||
&i.LatestBuildStatus,
|
||||
&i.LatestBuildHasAITask,
|
||||
&i.LatestBuildHasExternalAgent,
|
||||
&i.Count,
|
||||
); err != nil {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.27.0
|
||||
// sqlc v1.30.0
|
||||
|
||||
package database
|
||||
|
||||
@@ -4221,6 +4221,9 @@ type Task struct {
|
||||
WorkspaceBuildNumber sql.NullInt32 `db:"workspace_build_number" json:"workspace_build_number"`
|
||||
WorkspaceAgentID uuid.NullUUID `db:"workspace_agent_id" json:"workspace_agent_id"`
|
||||
WorkspaceAppID uuid.NullUUID `db:"workspace_app_id" json:"workspace_app_id"`
|
||||
OwnerUsername string `db:"owner_username" json:"owner_username"`
|
||||
OwnerName string `db:"owner_name" json:"owner_name"`
|
||||
OwnerAvatarUrl string `db:"owner_avatar_url" json:"owner_avatar_url"`
|
||||
}
|
||||
|
||||
type TaskTable struct {
|
||||
@@ -4250,6 +4253,14 @@ type TelemetryItem struct {
|
||||
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
||||
}
|
||||
|
||||
// Telemetry lock tracking table for deduplication of heartbeat events across replicas.
|
||||
type TelemetryLock struct {
|
||||
// The type of event that was sent.
|
||||
EventType string `db:"event_type" json:"event_type"`
|
||||
// The heartbeat period end timestamp.
|
||||
PeriodEndingAt time.Time `db:"period_ending_at" json:"period_ending_at"`
|
||||
}
|
||||
|
||||
// Joins in the display name information such as username, avatar, and organization name.
|
||||
type Template struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
@@ -4652,6 +4663,7 @@ type Workspace struct {
|
||||
TemplateDisplayName string `db:"template_display_name" json:"template_display_name"`
|
||||
TemplateIcon string `db:"template_icon" json:"template_icon"`
|
||||
TemplateDescription string `db:"template_description" json:"template_description"`
|
||||
TaskID uuid.NullUUID `db:"task_id" json:"task_id"`
|
||||
}
|
||||
|
||||
type WorkspaceAgent struct {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.27.0
|
||||
// sqlc v1.30.0
|
||||
|
||||
package database
|
||||
|
||||
@@ -60,6 +60,9 @@ type sqlcQuerier interface {
|
||||
BatchUpdateWorkspaceNextStartAt(ctx context.Context, arg BatchUpdateWorkspaceNextStartAtParams) error
|
||||
BulkMarkNotificationMessagesFailed(ctx context.Context, arg BulkMarkNotificationMessagesFailedParams) (int64, error)
|
||||
BulkMarkNotificationMessagesSent(ctx context.Context, arg BulkMarkNotificationMessagesSentParams) (int64, error)
|
||||
// Calculates the telemetry summary for a given provider, model, and client
|
||||
// combination for telemetry reporting.
|
||||
CalculateAIBridgeInterceptionsTelemetrySummary(ctx context.Context, arg CalculateAIBridgeInterceptionsTelemetrySummaryParams) (CalculateAIBridgeInterceptionsTelemetrySummaryRow, error)
|
||||
ClaimPrebuiltWorkspace(ctx context.Context, arg ClaimPrebuiltWorkspaceParams) (ClaimPrebuiltWorkspaceRow, error)
|
||||
CleanTailnetCoordinators(ctx context.Context) error
|
||||
CleanTailnetLostPeers(ctx context.Context) error
|
||||
@@ -107,6 +110,8 @@ type sqlcQuerier interface {
|
||||
// A provisioner daemon with "zeroed" last_seen_at column indicates possible
|
||||
// connectivity issues (no provisioner daemon activity since registration).
|
||||
DeleteOldProvisionerDaemons(ctx context.Context) error
|
||||
// Deletes old telemetry locks from the telemetry_locks table.
|
||||
DeleteOldTelemetryLocks(ctx context.Context, periodEndingAtBefore time.Time) error
|
||||
// If an agent hasn't connected in the last 7 days, we purge it's logs.
|
||||
// Exception: if the logs are related to the latest build, we keep those around.
|
||||
// Logs can take up a lot of space, so it's important we clean up frequently.
|
||||
@@ -264,6 +269,9 @@ type sqlcQuerier interface {
|
||||
GetOrganizationResourceCountByID(ctx context.Context, organizationID uuid.UUID) (GetOrganizationResourceCountByIDRow, error)
|
||||
GetOrganizations(ctx context.Context, arg GetOrganizationsParams) ([]Organization, error)
|
||||
GetOrganizationsByUserID(ctx context.Context, arg GetOrganizationsByUserIDParams) ([]Organization, error)
|
||||
// GetOrganizationsWithPrebuildStatus returns organizations with prebuilds configured and their
|
||||
// membership status for the prebuilds system user (org membership, group existence, group membership).
|
||||
GetOrganizationsWithPrebuildStatus(ctx context.Context, arg GetOrganizationsWithPrebuildStatusParams) ([]GetOrganizationsWithPrebuildStatusRow, error)
|
||||
GetParameterSchemasByJobID(ctx context.Context, jobID uuid.UUID) ([]ParameterSchema, error)
|
||||
GetPrebuildMetrics(ctx context.Context) ([]GetPrebuildMetricsRow, error)
|
||||
GetPrebuildsSettings(ctx context.Context) (string, error)
|
||||
@@ -559,6 +567,12 @@ type sqlcQuerier interface {
|
||||
InsertReplica(ctx context.Context, arg InsertReplicaParams) (Replica, error)
|
||||
InsertTask(ctx context.Context, arg InsertTaskParams) (TaskTable, error)
|
||||
InsertTelemetryItemIfNotExists(ctx context.Context, arg InsertTelemetryItemIfNotExistsParams) error
|
||||
// Inserts a new lock row into the telemetry_locks table. Replicas should call
|
||||
// this function prior to attempting to generate or publish a heartbeat event to
|
||||
// the telemetry service.
|
||||
// If the query returns a duplicate primary key error, the replica should not
|
||||
// attempt to generate or publish the event to the telemetry service.
|
||||
InsertTelemetryLock(ctx context.Context, arg InsertTelemetryLockParams) error
|
||||
InsertTemplate(ctx context.Context, arg InsertTemplateParams) error
|
||||
InsertTemplateVersion(ctx context.Context, arg InsertTemplateVersionParams) error
|
||||
InsertTemplateVersionParameter(ctx context.Context, arg InsertTemplateVersionParameterParams) (TemplateVersionParameter, error)
|
||||
@@ -595,6 +609,9 @@ type sqlcQuerier interface {
|
||||
InsertWorkspaceResource(ctx context.Context, arg InsertWorkspaceResourceParams) (WorkspaceResource, error)
|
||||
InsertWorkspaceResourceMetadata(ctx context.Context, arg InsertWorkspaceResourceMetadataParams) ([]WorkspaceResourceMetadatum, error)
|
||||
ListAIBridgeInterceptions(ctx context.Context, arg ListAIBridgeInterceptionsParams) ([]ListAIBridgeInterceptionsRow, error)
|
||||
// Finds all unique AIBridge interception telemetry summaries combinations
|
||||
// (provider, model, client) in the given timeframe for telemetry reporting.
|
||||
ListAIBridgeInterceptionsTelemetrySummaries(ctx context.Context, arg ListAIBridgeInterceptionsTelemetrySummariesParams) ([]ListAIBridgeInterceptionsTelemetrySummariesRow, error)
|
||||
ListAIBridgeTokenUsagesByInterceptionIDs(ctx context.Context, interceptionIds []uuid.UUID) ([]AIBridgeTokenUsage, error)
|
||||
ListAIBridgeToolUsagesByInterceptionIDs(ctx context.Context, interceptionIds []uuid.UUID) ([]AIBridgeToolUsage, error)
|
||||
ListAIBridgeUserPromptsByInterceptionIDs(ctx context.Context, interceptionIds []uuid.UUID) ([]AIBridgeUserPrompt, error)
|
||||
@@ -632,6 +649,7 @@ type sqlcQuerier interface {
|
||||
// This will always work regardless of the current state of the template version.
|
||||
UnarchiveTemplateVersion(ctx context.Context, arg UnarchiveTemplateVersionParams) error
|
||||
UnfavoriteWorkspace(ctx context.Context, id uuid.UUID) error
|
||||
UpdateAIBridgeInterceptionEnded(ctx context.Context, arg UpdateAIBridgeInterceptionEndedParams) (AIBridgeInterception, error)
|
||||
UpdateAPIKeyByID(ctx context.Context, arg UpdateAPIKeyByIDParams) error
|
||||
UpdateCryptoKeyDeletesAt(ctx context.Context, arg UpdateCryptoKeyDeletesAtParams) (CryptoKey, error)
|
||||
UpdateCustomRole(ctx context.Context, arg UpdateCustomRoleParams) (CustomRole, error)
|
||||
@@ -652,7 +670,7 @@ type sqlcQuerier interface {
|
||||
// Cancels all pending provisioner jobs for prebuilt workspaces on a specific preset from an
|
||||
// inactive template version.
|
||||
// This is an optimization to clean up stale pending jobs.
|
||||
UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg UpdatePrebuildProvisionerJobWithCancelParams) ([]uuid.UUID, error)
|
||||
UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg UpdatePrebuildProvisionerJobWithCancelParams) ([]UpdatePrebuildProvisionerJobWithCancelRow, error)
|
||||
UpdatePresetPrebuildStatus(ctx context.Context, arg UpdatePresetPrebuildStatusParams) error
|
||||
UpdateProvisionerDaemonLastSeenAt(ctx context.Context, arg UpdateProvisionerDaemonLastSeenAtParams) error
|
||||
UpdateProvisionerJobByID(ctx context.Context, arg UpdateProvisionerJobByIDParams) error
|
||||
|
||||
@@ -7248,7 +7248,9 @@ func TestTaskNameUniqueness(t *testing.T) {
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
taskID := uuid.New()
|
||||
task, err := db.InsertTask(ctx, database.InsertTaskParams{
|
||||
ID: taskID,
|
||||
OrganizationID: org.ID,
|
||||
OwnerID: tt.ownerID,
|
||||
Name: tt.taskName,
|
||||
@@ -7263,6 +7265,7 @@ func TestTaskNameUniqueness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, uuid.Nil, task.ID)
|
||||
require.NotEqual(t, task1.ID, task.ID)
|
||||
require.Equal(t, taskID, task.ID)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -7724,3 +7727,68 @@ func TestUpdateTaskWorkspaceID(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateAIBridgeInterceptionEnded(t *testing.T) {
|
||||
t.Parallel()
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
|
||||
t.Run("NonExistingInterception", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
got, err := db.UpdateAIBridgeInterceptionEnded(ctx, database.UpdateAIBridgeInterceptionEndedParams{
|
||||
ID: uuid.New(),
|
||||
EndedAt: time.Now(),
|
||||
})
|
||||
require.ErrorContains(t, err, "no rows in result set")
|
||||
require.EqualValues(t, database.AIBridgeInterception{}, got)
|
||||
})
|
||||
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
user := dbgen.User(t, db, database.User{})
|
||||
interceptions := []database.AIBridgeInterception{}
|
||||
|
||||
for _, uid := range []uuid.UUID{{1}, {2}, {3}} {
|
||||
insertParams := database.InsertAIBridgeInterceptionParams{
|
||||
ID: uid,
|
||||
InitiatorID: user.ID,
|
||||
Metadata: json.RawMessage("{}"),
|
||||
}
|
||||
|
||||
intc, err := db.InsertAIBridgeInterception(ctx, insertParams)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uid, intc.ID)
|
||||
require.False(t, intc.EndedAt.Valid)
|
||||
interceptions = append(interceptions, intc)
|
||||
}
|
||||
|
||||
intc0 := interceptions[0]
|
||||
endedAt := time.Now()
|
||||
// Mark first interception as done
|
||||
updated, err := db.UpdateAIBridgeInterceptionEnded(ctx, database.UpdateAIBridgeInterceptionEndedParams{
|
||||
ID: intc0.ID,
|
||||
EndedAt: endedAt,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, updated.ID, intc0.ID)
|
||||
require.True(t, updated.EndedAt.Valid)
|
||||
require.WithinDuration(t, endedAt, updated.EndedAt.Time, 5*time.Second)
|
||||
|
||||
// Updating first interception again should fail
|
||||
updated, err = db.UpdateAIBridgeInterceptionEnded(ctx, database.UpdateAIBridgeInterceptionEndedParams{
|
||||
ID: intc0.ID,
|
||||
EndedAt: endedAt.Add(time.Hour),
|
||||
})
|
||||
require.ErrorIs(t, err, sql.ErrNoRows)
|
||||
|
||||
// Other interceptions should not have ended_at set
|
||||
for _, intc := range interceptions[1:] {
|
||||
got, err := db.GetAIBridgeInterceptionByID(ctx, intc.ID)
|
||||
require.NoError(t, err)
|
||||
require.False(t, got.EndedAt.Valid)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
+439
-49
@@ -1,6 +1,6 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.27.0
|
||||
// sqlc v1.30.0
|
||||
|
||||
package database
|
||||
|
||||
@@ -111,6 +111,164 @@ func (q *sqlQuerier) ActivityBumpWorkspace(ctx context.Context, arg ActivityBump
|
||||
return err
|
||||
}
|
||||
|
||||
const calculateAIBridgeInterceptionsTelemetrySummary = `-- name: CalculateAIBridgeInterceptionsTelemetrySummary :one
|
||||
WITH interceptions_in_range AS (
|
||||
-- Get all matching interceptions in the given timeframe.
|
||||
SELECT
|
||||
id,
|
||||
initiator_id,
|
||||
(ended_at - started_at) AS duration
|
||||
FROM
|
||||
aibridge_interceptions
|
||||
WHERE
|
||||
provider = $1::text
|
||||
AND model = $2::text
|
||||
-- TODO: use the client value once we have it (see https://github.com/coder/aibridge/issues/31)
|
||||
AND 'unknown' = $3::text
|
||||
AND ended_at IS NOT NULL -- incomplete interceptions are not included in summaries
|
||||
AND ended_at >= $4::timestamptz
|
||||
AND ended_at < $5::timestamptz
|
||||
),
|
||||
interception_counts AS (
|
||||
SELECT
|
||||
COUNT(id) AS interception_count,
|
||||
COUNT(DISTINCT initiator_id) AS unique_initiator_count
|
||||
FROM
|
||||
interceptions_in_range
|
||||
),
|
||||
duration_percentiles AS (
|
||||
SELECT
|
||||
(COALESCE(PERCENTILE_CONT(0.50) WITHIN GROUP (ORDER BY EXTRACT(EPOCH FROM duration)), 0) * 1000)::bigint AS interception_duration_p50_millis,
|
||||
(COALESCE(PERCENTILE_CONT(0.90) WITHIN GROUP (ORDER BY EXTRACT(EPOCH FROM duration)), 0) * 1000)::bigint AS interception_duration_p90_millis,
|
||||
(COALESCE(PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY EXTRACT(EPOCH FROM duration)), 0) * 1000)::bigint AS interception_duration_p95_millis,
|
||||
(COALESCE(PERCENTILE_CONT(0.99) WITHIN GROUP (ORDER BY EXTRACT(EPOCH FROM duration)), 0) * 1000)::bigint AS interception_duration_p99_millis
|
||||
FROM
|
||||
interceptions_in_range
|
||||
),
|
||||
token_aggregates AS (
|
||||
SELECT
|
||||
COALESCE(SUM(tu.input_tokens), 0) AS token_count_input,
|
||||
COALESCE(SUM(tu.output_tokens), 0) AS token_count_output,
|
||||
-- Cached tokens are stored in metadata JSON, extract if available.
|
||||
-- Read tokens may be stored in:
|
||||
-- - cache_read_input (Anthropic)
|
||||
-- - prompt_cached (OpenAI)
|
||||
COALESCE(SUM(
|
||||
COALESCE((tu.metadata->>'cache_read_input')::bigint, 0) +
|
||||
COALESCE((tu.metadata->>'prompt_cached')::bigint, 0)
|
||||
), 0) AS token_count_cached_read,
|
||||
-- Written tokens may be stored in:
|
||||
-- - cache_creation_input (Anthropic)
|
||||
-- Note that cache_ephemeral_5m_input and cache_ephemeral_1h_input on
|
||||
-- Anthropic are included in the cache_creation_input field.
|
||||
COALESCE(SUM(
|
||||
COALESCE((tu.metadata->>'cache_creation_input')::bigint, 0)
|
||||
), 0) AS token_count_cached_written,
|
||||
COUNT(tu.id) AS token_usages_count
|
||||
FROM
|
||||
interceptions_in_range i
|
||||
LEFT JOIN
|
||||
aibridge_token_usages tu ON i.id = tu.interception_id
|
||||
),
|
||||
prompt_aggregates AS (
|
||||
SELECT
|
||||
COUNT(up.id) AS user_prompts_count
|
||||
FROM
|
||||
interceptions_in_range i
|
||||
LEFT JOIN
|
||||
aibridge_user_prompts up ON i.id = up.interception_id
|
||||
),
|
||||
tool_aggregates AS (
|
||||
SELECT
|
||||
COUNT(tu.id) FILTER (WHERE tu.injected = true) AS tool_calls_count_injected,
|
||||
COUNT(tu.id) FILTER (WHERE tu.injected = false) AS tool_calls_count_non_injected,
|
||||
COUNT(tu.id) FILTER (WHERE tu.injected = true AND tu.invocation_error IS NOT NULL) AS injected_tool_call_error_count
|
||||
FROM
|
||||
interceptions_in_range i
|
||||
LEFT JOIN
|
||||
aibridge_tool_usages tu ON i.id = tu.interception_id
|
||||
)
|
||||
SELECT
|
||||
ic.interception_count::bigint AS interception_count,
|
||||
dp.interception_duration_p50_millis::bigint AS interception_duration_p50_millis,
|
||||
dp.interception_duration_p90_millis::bigint AS interception_duration_p90_millis,
|
||||
dp.interception_duration_p95_millis::bigint AS interception_duration_p95_millis,
|
||||
dp.interception_duration_p99_millis::bigint AS interception_duration_p99_millis,
|
||||
ic.unique_initiator_count::bigint AS unique_initiator_count,
|
||||
pa.user_prompts_count::bigint AS user_prompts_count,
|
||||
tok_agg.token_usages_count::bigint AS token_usages_count,
|
||||
tok_agg.token_count_input::bigint AS token_count_input,
|
||||
tok_agg.token_count_output::bigint AS token_count_output,
|
||||
tok_agg.token_count_cached_read::bigint AS token_count_cached_read,
|
||||
tok_agg.token_count_cached_written::bigint AS token_count_cached_written,
|
||||
tool_agg.tool_calls_count_injected::bigint AS tool_calls_count_injected,
|
||||
tool_agg.tool_calls_count_non_injected::bigint AS tool_calls_count_non_injected,
|
||||
tool_agg.injected_tool_call_error_count::bigint AS injected_tool_call_error_count
|
||||
FROM
|
||||
interception_counts ic,
|
||||
duration_percentiles dp,
|
||||
token_aggregates tok_agg,
|
||||
prompt_aggregates pa,
|
||||
tool_aggregates tool_agg
|
||||
`
|
||||
|
||||
type CalculateAIBridgeInterceptionsTelemetrySummaryParams struct {
|
||||
Provider string `db:"provider" json:"provider"`
|
||||
Model string `db:"model" json:"model"`
|
||||
Client string `db:"client" json:"client"`
|
||||
EndedAtAfter time.Time `db:"ended_at_after" json:"ended_at_after"`
|
||||
EndedAtBefore time.Time `db:"ended_at_before" json:"ended_at_before"`
|
||||
}
|
||||
|
||||
type CalculateAIBridgeInterceptionsTelemetrySummaryRow struct {
|
||||
InterceptionCount int64 `db:"interception_count" json:"interception_count"`
|
||||
InterceptionDurationP50Millis int64 `db:"interception_duration_p50_millis" json:"interception_duration_p50_millis"`
|
||||
InterceptionDurationP90Millis int64 `db:"interception_duration_p90_millis" json:"interception_duration_p90_millis"`
|
||||
InterceptionDurationP95Millis int64 `db:"interception_duration_p95_millis" json:"interception_duration_p95_millis"`
|
||||
InterceptionDurationP99Millis int64 `db:"interception_duration_p99_millis" json:"interception_duration_p99_millis"`
|
||||
UniqueInitiatorCount int64 `db:"unique_initiator_count" json:"unique_initiator_count"`
|
||||
UserPromptsCount int64 `db:"user_prompts_count" json:"user_prompts_count"`
|
||||
TokenUsagesCount int64 `db:"token_usages_count" json:"token_usages_count"`
|
||||
TokenCountInput int64 `db:"token_count_input" json:"token_count_input"`
|
||||
TokenCountOutput int64 `db:"token_count_output" json:"token_count_output"`
|
||||
TokenCountCachedRead int64 `db:"token_count_cached_read" json:"token_count_cached_read"`
|
||||
TokenCountCachedWritten int64 `db:"token_count_cached_written" json:"token_count_cached_written"`
|
||||
ToolCallsCountInjected int64 `db:"tool_calls_count_injected" json:"tool_calls_count_injected"`
|
||||
ToolCallsCountNonInjected int64 `db:"tool_calls_count_non_injected" json:"tool_calls_count_non_injected"`
|
||||
InjectedToolCallErrorCount int64 `db:"injected_tool_call_error_count" json:"injected_tool_call_error_count"`
|
||||
}
|
||||
|
||||
// Calculates the telemetry summary for a given provider, model, and client
|
||||
// combination for telemetry reporting.
|
||||
func (q *sqlQuerier) CalculateAIBridgeInterceptionsTelemetrySummary(ctx context.Context, arg CalculateAIBridgeInterceptionsTelemetrySummaryParams) (CalculateAIBridgeInterceptionsTelemetrySummaryRow, error) {
|
||||
row := q.db.QueryRowContext(ctx, calculateAIBridgeInterceptionsTelemetrySummary,
|
||||
arg.Provider,
|
||||
arg.Model,
|
||||
arg.Client,
|
||||
arg.EndedAtAfter,
|
||||
arg.EndedAtBefore,
|
||||
)
|
||||
var i CalculateAIBridgeInterceptionsTelemetrySummaryRow
|
||||
err := row.Scan(
|
||||
&i.InterceptionCount,
|
||||
&i.InterceptionDurationP50Millis,
|
||||
&i.InterceptionDurationP90Millis,
|
||||
&i.InterceptionDurationP95Millis,
|
||||
&i.InterceptionDurationP99Millis,
|
||||
&i.UniqueInitiatorCount,
|
||||
&i.UserPromptsCount,
|
||||
&i.TokenUsagesCount,
|
||||
&i.TokenCountInput,
|
||||
&i.TokenCountOutput,
|
||||
&i.TokenCountCachedRead,
|
||||
&i.TokenCountCachedWritten,
|
||||
&i.ToolCallsCountInjected,
|
||||
&i.ToolCallsCountNonInjected,
|
||||
&i.InjectedToolCallErrorCount,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const countAIBridgeInterceptions = `-- name: CountAIBridgeInterceptions :one
|
||||
SELECT
|
||||
COUNT(*)
|
||||
@@ -647,6 +805,57 @@ func (q *sqlQuerier) ListAIBridgeInterceptions(ctx context.Context, arg ListAIBr
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const listAIBridgeInterceptionsTelemetrySummaries = `-- name: ListAIBridgeInterceptionsTelemetrySummaries :many
|
||||
SELECT
|
||||
DISTINCT ON (provider, model, client)
|
||||
provider,
|
||||
model,
|
||||
-- TODO: use the client value once we have it (see https://github.com/coder/aibridge/issues/31)
|
||||
'unknown' AS client
|
||||
FROM
|
||||
aibridge_interceptions
|
||||
WHERE
|
||||
ended_at IS NOT NULL -- incomplete interceptions are not included in summaries
|
||||
AND ended_at >= $1::timestamptz
|
||||
AND ended_at < $2::timestamptz
|
||||
`
|
||||
|
||||
type ListAIBridgeInterceptionsTelemetrySummariesParams struct {
|
||||
EndedAtAfter time.Time `db:"ended_at_after" json:"ended_at_after"`
|
||||
EndedAtBefore time.Time `db:"ended_at_before" json:"ended_at_before"`
|
||||
}
|
||||
|
||||
type ListAIBridgeInterceptionsTelemetrySummariesRow struct {
|
||||
Provider string `db:"provider" json:"provider"`
|
||||
Model string `db:"model" json:"model"`
|
||||
Client string `db:"client" json:"client"`
|
||||
}
|
||||
|
||||
// Finds all unique AIBridge interception telemetry summaries combinations
|
||||
// (provider, model, client) in the given timeframe for telemetry reporting.
|
||||
func (q *sqlQuerier) ListAIBridgeInterceptionsTelemetrySummaries(ctx context.Context, arg ListAIBridgeInterceptionsTelemetrySummariesParams) ([]ListAIBridgeInterceptionsTelemetrySummariesRow, error) {
|
||||
rows, err := q.db.QueryContext(ctx, listAIBridgeInterceptionsTelemetrySummaries, arg.EndedAtAfter, arg.EndedAtBefore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []ListAIBridgeInterceptionsTelemetrySummariesRow
|
||||
for rows.Next() {
|
||||
var i ListAIBridgeInterceptionsTelemetrySummariesRow
|
||||
if err := rows.Scan(&i.Provider, &i.Model, &i.Client); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const listAIBridgeTokenUsagesByInterceptionIDs = `-- name: ListAIBridgeTokenUsagesByInterceptionIDs :many
|
||||
SELECT
|
||||
id, interception_id, provider_response_id, input_tokens, output_tokens, metadata, created_at
|
||||
@@ -778,6 +987,35 @@ func (q *sqlQuerier) ListAIBridgeUserPromptsByInterceptionIDs(ctx context.Contex
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const updateAIBridgeInterceptionEnded = `-- name: UpdateAIBridgeInterceptionEnded :one
|
||||
UPDATE aibridge_interceptions
|
||||
SET ended_at = $1::timestamptz
|
||||
WHERE
|
||||
id = $2::uuid
|
||||
AND ended_at IS NULL
|
||||
RETURNING id, initiator_id, provider, model, started_at, metadata, ended_at
|
||||
`
|
||||
|
||||
type UpdateAIBridgeInterceptionEndedParams struct {
|
||||
EndedAt time.Time `db:"ended_at" json:"ended_at"`
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) UpdateAIBridgeInterceptionEnded(ctx context.Context, arg UpdateAIBridgeInterceptionEndedParams) (AIBridgeInterception, error) {
|
||||
row := q.db.QueryRowContext(ctx, updateAIBridgeInterceptionEnded, arg.EndedAt, arg.ID)
|
||||
var i AIBridgeInterception
|
||||
err := row.Scan(
|
||||
&i.ID,
|
||||
&i.InitiatorID,
|
||||
&i.Provider,
|
||||
&i.Model,
|
||||
&i.StartedAt,
|
||||
&i.Metadata,
|
||||
&i.EndedAt,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const deleteAPIKeyByID = `-- name: DeleteAPIKeyByID :exec
|
||||
DELETE FROM
|
||||
api_keys
|
||||
@@ -8047,6 +8285,93 @@ func (q *sqlQuerier) FindMatchingPresetID(ctx context.Context, arg FindMatchingP
|
||||
return template_version_preset_id, err
|
||||
}
|
||||
|
||||
const getOrganizationsWithPrebuildStatus = `-- name: GetOrganizationsWithPrebuildStatus :many
|
||||
WITH orgs_with_prebuilds AS (
|
||||
-- Get unique organizations that have presets with prebuilds configured
|
||||
SELECT DISTINCT o.id, o.name
|
||||
FROM organizations o
|
||||
INNER JOIN templates t ON t.organization_id = o.id
|
||||
INNER JOIN template_versions tv ON tv.template_id = t.id
|
||||
INNER JOIN template_version_presets tvp ON tvp.template_version_id = tv.id
|
||||
WHERE tvp.desired_instances IS NOT NULL
|
||||
),
|
||||
prebuild_user_membership AS (
|
||||
-- Check if the user is a member of the organizations
|
||||
SELECT om.organization_id
|
||||
FROM organization_members om
|
||||
INNER JOIN orgs_with_prebuilds owp ON owp.id = om.organization_id
|
||||
WHERE om.user_id = $1::uuid
|
||||
),
|
||||
prebuild_groups AS (
|
||||
-- Check if the organizations have the prebuilds group
|
||||
SELECT g.organization_id, g.id as group_id
|
||||
FROM groups g
|
||||
INNER JOIN orgs_with_prebuilds owp ON owp.id = g.organization_id
|
||||
WHERE g.name = $2::text
|
||||
),
|
||||
prebuild_group_membership AS (
|
||||
-- Check if the user is in the prebuilds group
|
||||
SELECT pg.organization_id
|
||||
FROM prebuild_groups pg
|
||||
INNER JOIN group_members gm ON gm.group_id = pg.group_id
|
||||
WHERE gm.user_id = $1::uuid
|
||||
)
|
||||
SELECT
|
||||
owp.id AS organization_id,
|
||||
owp.name AS organization_name,
|
||||
(pum.organization_id IS NOT NULL)::boolean AS has_prebuild_user,
|
||||
pg.group_id AS prebuilds_group_id,
|
||||
(pgm.organization_id IS NOT NULL)::boolean AS has_prebuild_user_in_group
|
||||
FROM orgs_with_prebuilds owp
|
||||
LEFT JOIN prebuild_groups pg ON pg.organization_id = owp.id
|
||||
LEFT JOIN prebuild_user_membership pum ON pum.organization_id = owp.id
|
||||
LEFT JOIN prebuild_group_membership pgm ON pgm.organization_id = owp.id
|
||||
`
|
||||
|
||||
type GetOrganizationsWithPrebuildStatusParams struct {
|
||||
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
||||
GroupName string `db:"group_name" json:"group_name"`
|
||||
}
|
||||
|
||||
type GetOrganizationsWithPrebuildStatusRow struct {
|
||||
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
||||
OrganizationName string `db:"organization_name" json:"organization_name"`
|
||||
HasPrebuildUser bool `db:"has_prebuild_user" json:"has_prebuild_user"`
|
||||
PrebuildsGroupID uuid.NullUUID `db:"prebuilds_group_id" json:"prebuilds_group_id"`
|
||||
HasPrebuildUserInGroup bool `db:"has_prebuild_user_in_group" json:"has_prebuild_user_in_group"`
|
||||
}
|
||||
|
||||
// GetOrganizationsWithPrebuildStatus returns organizations with prebuilds configured and their
|
||||
// membership status for the prebuilds system user (org membership, group existence, group membership).
|
||||
func (q *sqlQuerier) GetOrganizationsWithPrebuildStatus(ctx context.Context, arg GetOrganizationsWithPrebuildStatusParams) ([]GetOrganizationsWithPrebuildStatusRow, error) {
|
||||
rows, err := q.db.QueryContext(ctx, getOrganizationsWithPrebuildStatus, arg.UserID, arg.GroupName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetOrganizationsWithPrebuildStatusRow
|
||||
for rows.Next() {
|
||||
var i GetOrganizationsWithPrebuildStatusRow
|
||||
if err := rows.Scan(
|
||||
&i.OrganizationID,
|
||||
&i.OrganizationName,
|
||||
&i.HasPrebuildUser,
|
||||
&i.PrebuildsGroupID,
|
||||
&i.HasPrebuildUserInGroup,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getPrebuildMetrics = `-- name: GetPrebuildMetrics :many
|
||||
SELECT
|
||||
t.name as template_name,
|
||||
@@ -8449,12 +8774,8 @@ func (q *sqlQuerier) GetTemplatePresetsWithPrebuilds(ctx context.Context, templa
|
||||
}
|
||||
|
||||
const updatePrebuildProvisionerJobWithCancel = `-- name: UpdatePrebuildProvisionerJobWithCancel :many
|
||||
UPDATE provisioner_jobs
|
||||
SET
|
||||
canceled_at = $1::timestamptz,
|
||||
completed_at = $1::timestamptz
|
||||
WHERE id IN (
|
||||
SELECT pj.id
|
||||
WITH jobs_to_cancel AS (
|
||||
SELECT pj.id, w.id AS workspace_id, w.template_id, wpb.template_version_preset_id
|
||||
FROM provisioner_jobs pj
|
||||
INNER JOIN workspace_prebuild_builds wpb ON wpb.job_id = pj.id
|
||||
INNER JOIN workspaces w ON w.id = wpb.workspace_id
|
||||
@@ -8473,7 +8794,13 @@ WHERE id IN (
|
||||
AND pj.canceled_at IS NULL
|
||||
AND pj.completed_at IS NULL
|
||||
)
|
||||
RETURNING id
|
||||
UPDATE provisioner_jobs
|
||||
SET
|
||||
canceled_at = $1::timestamptz,
|
||||
completed_at = $1::timestamptz
|
||||
FROM jobs_to_cancel
|
||||
WHERE provisioner_jobs.id = jobs_to_cancel.id
|
||||
RETURNING jobs_to_cancel.id, jobs_to_cancel.workspace_id, jobs_to_cancel.template_id, jobs_to_cancel.template_version_preset_id
|
||||
`
|
||||
|
||||
type UpdatePrebuildProvisionerJobWithCancelParams struct {
|
||||
@@ -8481,22 +8808,34 @@ type UpdatePrebuildProvisionerJobWithCancelParams struct {
|
||||
PresetID uuid.NullUUID `db:"preset_id" json:"preset_id"`
|
||||
}
|
||||
|
||||
type UpdatePrebuildProvisionerJobWithCancelRow struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
||||
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
||||
TemplateVersionPresetID uuid.NullUUID `db:"template_version_preset_id" json:"template_version_preset_id"`
|
||||
}
|
||||
|
||||
// Cancels all pending provisioner jobs for prebuilt workspaces on a specific preset from an
|
||||
// inactive template version.
|
||||
// This is an optimization to clean up stale pending jobs.
|
||||
func (q *sqlQuerier) UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg UpdatePrebuildProvisionerJobWithCancelParams) ([]uuid.UUID, error) {
|
||||
func (q *sqlQuerier) UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg UpdatePrebuildProvisionerJobWithCancelParams) ([]UpdatePrebuildProvisionerJobWithCancelRow, error) {
|
||||
rows, err := q.db.QueryContext(ctx, updatePrebuildProvisionerJobWithCancel, arg.Now, arg.PresetID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []uuid.UUID
|
||||
var items []UpdatePrebuildProvisionerJobWithCancelRow
|
||||
for rows.Next() {
|
||||
var id uuid.UUID
|
||||
if err := rows.Scan(&id); err != nil {
|
||||
var i UpdatePrebuildProvisionerJobWithCancelRow
|
||||
if err := rows.Scan(
|
||||
&i.ID,
|
||||
&i.WorkspaceID,
|
||||
&i.TemplateID,
|
||||
&i.TemplateVersionPresetID,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, id)
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return nil, err
|
||||
@@ -12726,7 +13065,7 @@ func (q *sqlQuerier) DeleteTask(ctx context.Context, arg DeleteTaskParams) (Task
|
||||
}
|
||||
|
||||
const getTaskByID = `-- name: GetTaskByID :one
|
||||
SELECT id, organization_id, owner_id, name, workspace_id, template_version_id, template_parameters, prompt, created_at, deleted_at, status, workspace_build_number, workspace_agent_id, workspace_app_id FROM tasks_with_status WHERE id = $1::uuid
|
||||
SELECT id, organization_id, owner_id, name, workspace_id, template_version_id, template_parameters, prompt, created_at, deleted_at, status, workspace_build_number, workspace_agent_id, workspace_app_id, owner_username, owner_name, owner_avatar_url FROM tasks_with_status WHERE id = $1::uuid
|
||||
`
|
||||
|
||||
func (q *sqlQuerier) GetTaskByID(ctx context.Context, id uuid.UUID) (Task, error) {
|
||||
@@ -12747,12 +13086,15 @@ func (q *sqlQuerier) GetTaskByID(ctx context.Context, id uuid.UUID) (Task, error
|
||||
&i.WorkspaceBuildNumber,
|
||||
&i.WorkspaceAgentID,
|
||||
&i.WorkspaceAppID,
|
||||
&i.OwnerUsername,
|
||||
&i.OwnerName,
|
||||
&i.OwnerAvatarUrl,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getTaskByWorkspaceID = `-- name: GetTaskByWorkspaceID :one
|
||||
SELECT id, organization_id, owner_id, name, workspace_id, template_version_id, template_parameters, prompt, created_at, deleted_at, status, workspace_build_number, workspace_agent_id, workspace_app_id FROM tasks_with_status WHERE workspace_id = $1::uuid
|
||||
SELECT id, organization_id, owner_id, name, workspace_id, template_version_id, template_parameters, prompt, created_at, deleted_at, status, workspace_build_number, workspace_agent_id, workspace_app_id, owner_username, owner_name, owner_avatar_url FROM tasks_with_status WHERE workspace_id = $1::uuid
|
||||
`
|
||||
|
||||
func (q *sqlQuerier) GetTaskByWorkspaceID(ctx context.Context, workspaceID uuid.UUID) (Task, error) {
|
||||
@@ -12773,6 +13115,9 @@ func (q *sqlQuerier) GetTaskByWorkspaceID(ctx context.Context, workspaceID uuid.
|
||||
&i.WorkspaceBuildNumber,
|
||||
&i.WorkspaceAgentID,
|
||||
&i.WorkspaceAppID,
|
||||
&i.OwnerUsername,
|
||||
&i.OwnerName,
|
||||
&i.OwnerAvatarUrl,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
@@ -12781,11 +13126,12 @@ const insertTask = `-- name: InsertTask :one
|
||||
INSERT INTO tasks
|
||||
(id, organization_id, owner_id, name, workspace_id, template_version_id, template_parameters, prompt, created_at)
|
||||
VALUES
|
||||
(gen_random_uuid(), $1, $2, $3, $4, $5, $6, $7, $8)
|
||||
($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
||||
RETURNING id, organization_id, owner_id, name, workspace_id, template_version_id, template_parameters, prompt, created_at, deleted_at
|
||||
`
|
||||
|
||||
type InsertTaskParams struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
||||
OwnerID uuid.UUID `db:"owner_id" json:"owner_id"`
|
||||
Name string `db:"name" json:"name"`
|
||||
@@ -12798,6 +13144,7 @@ type InsertTaskParams struct {
|
||||
|
||||
func (q *sqlQuerier) InsertTask(ctx context.Context, arg InsertTaskParams) (TaskTable, error) {
|
||||
row := q.db.QueryRowContext(ctx, insertTask,
|
||||
arg.ID,
|
||||
arg.OrganizationID,
|
||||
arg.OwnerID,
|
||||
arg.Name,
|
||||
@@ -12824,7 +13171,7 @@ func (q *sqlQuerier) InsertTask(ctx context.Context, arg InsertTaskParams) (Task
|
||||
}
|
||||
|
||||
const listTasks = `-- name: ListTasks :many
|
||||
SELECT id, organization_id, owner_id, name, workspace_id, template_version_id, template_parameters, prompt, created_at, deleted_at, status, workspace_build_number, workspace_agent_id, workspace_app_id FROM tasks_with_status tws
|
||||
SELECT id, organization_id, owner_id, name, workspace_id, template_version_id, template_parameters, prompt, created_at, deleted_at, status, workspace_build_number, workspace_agent_id, workspace_app_id, owner_username, owner_name, owner_avatar_url FROM tasks_with_status tws
|
||||
WHERE tws.deleted_at IS NULL
|
||||
AND CASE WHEN $1::UUID != '00000000-0000-0000-0000-000000000000' THEN tws.owner_id = $1::UUID ELSE TRUE END
|
||||
AND CASE WHEN $2::UUID != '00000000-0000-0000-0000-000000000000' THEN tws.organization_id = $2::UUID ELSE TRUE END
|
||||
@@ -12862,6 +13209,9 @@ func (q *sqlQuerier) ListTasks(ctx context.Context, arg ListTasksParams) ([]Task
|
||||
&i.WorkspaceBuildNumber,
|
||||
&i.WorkspaceAgentID,
|
||||
&i.WorkspaceAppID,
|
||||
&i.OwnerUsername,
|
||||
&i.OwnerName,
|
||||
&i.OwnerAvatarUrl,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -13035,6 +13385,41 @@ func (q *sqlQuerier) UpsertTelemetryItem(ctx context.Context, arg UpsertTelemetr
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteOldTelemetryLocks = `-- name: DeleteOldTelemetryLocks :exec
|
||||
DELETE FROM
|
||||
telemetry_locks
|
||||
WHERE
|
||||
period_ending_at < $1::timestamptz
|
||||
`
|
||||
|
||||
// Deletes old telemetry locks from the telemetry_locks table.
|
||||
func (q *sqlQuerier) DeleteOldTelemetryLocks(ctx context.Context, periodEndingAtBefore time.Time) error {
|
||||
_, err := q.db.ExecContext(ctx, deleteOldTelemetryLocks, periodEndingAtBefore)
|
||||
return err
|
||||
}
|
||||
|
||||
const insertTelemetryLock = `-- name: InsertTelemetryLock :exec
|
||||
INSERT INTO
|
||||
telemetry_locks (event_type, period_ending_at)
|
||||
VALUES
|
||||
($1, $2)
|
||||
`
|
||||
|
||||
type InsertTelemetryLockParams struct {
|
||||
EventType string `db:"event_type" json:"event_type"`
|
||||
PeriodEndingAt time.Time `db:"period_ending_at" json:"period_ending_at"`
|
||||
}
|
||||
|
||||
// Inserts a new lock row into the telemetry_locks table. Replicas should call
|
||||
// this function prior to attempting to generate or publish a heartbeat event to
|
||||
// the telemetry service.
|
||||
// If the query returns a duplicate primary key error, the replica should not
|
||||
// attempt to generate or publish the event to the telemetry service.
|
||||
func (q *sqlQuerier) InsertTelemetryLock(ctx context.Context, arg InsertTelemetryLockParams) error {
|
||||
_, err := q.db.ExecContext(ctx, insertTelemetryLock, arg.EventType, arg.PeriodEndingAt)
|
||||
return err
|
||||
}
|
||||
|
||||
const getTemplateAverageBuildTime = `-- name: GetTemplateAverageBuildTime :one
|
||||
WITH build_times AS (
|
||||
SELECT
|
||||
@@ -17184,7 +17569,8 @@ const getWorkspaceAgentAndLatestBuildByAuthToken = `-- name: GetWorkspaceAgentAn
|
||||
SELECT
|
||||
workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite, workspaces.next_start_at, workspaces.group_acl, workspaces.user_acl,
|
||||
workspace_agents.id, workspace_agents.created_at, workspace_agents.updated_at, workspace_agents.name, workspace_agents.first_connected_at, workspace_agents.last_connected_at, workspace_agents.disconnected_at, workspace_agents.resource_id, workspace_agents.auth_token, workspace_agents.auth_instance_id, workspace_agents.architecture, workspace_agents.environment_variables, workspace_agents.operating_system, workspace_agents.instance_metadata, workspace_agents.resource_metadata, workspace_agents.directory, workspace_agents.version, workspace_agents.last_connected_replica_id, workspace_agents.connection_timeout_seconds, workspace_agents.troubleshooting_url, workspace_agents.motd_file, workspace_agents.lifecycle_state, workspace_agents.expanded_directory, workspace_agents.logs_length, workspace_agents.logs_overflowed, workspace_agents.started_at, workspace_agents.ready_at, workspace_agents.subsystems, workspace_agents.display_apps, workspace_agents.api_version, workspace_agents.display_order, workspace_agents.parent_id, workspace_agents.api_key_scope, workspace_agents.deleted,
|
||||
workspace_build_with_user.id, workspace_build_with_user.created_at, workspace_build_with_user.updated_at, workspace_build_with_user.workspace_id, workspace_build_with_user.template_version_id, workspace_build_with_user.build_number, workspace_build_with_user.transition, workspace_build_with_user.initiator_id, workspace_build_with_user.provisioner_state, workspace_build_with_user.job_id, workspace_build_with_user.deadline, workspace_build_with_user.reason, workspace_build_with_user.daily_cost, workspace_build_with_user.max_deadline, workspace_build_with_user.template_version_preset_id, workspace_build_with_user.has_ai_task, workspace_build_with_user.ai_task_sidebar_app_id, workspace_build_with_user.has_external_agent, workspace_build_with_user.initiator_by_avatar_url, workspace_build_with_user.initiator_by_username, workspace_build_with_user.initiator_by_name
|
||||
workspace_build_with_user.id, workspace_build_with_user.created_at, workspace_build_with_user.updated_at, workspace_build_with_user.workspace_id, workspace_build_with_user.template_version_id, workspace_build_with_user.build_number, workspace_build_with_user.transition, workspace_build_with_user.initiator_id, workspace_build_with_user.provisioner_state, workspace_build_with_user.job_id, workspace_build_with_user.deadline, workspace_build_with_user.reason, workspace_build_with_user.daily_cost, workspace_build_with_user.max_deadline, workspace_build_with_user.template_version_preset_id, workspace_build_with_user.has_ai_task, workspace_build_with_user.ai_task_sidebar_app_id, workspace_build_with_user.has_external_agent, workspace_build_with_user.initiator_by_avatar_url, workspace_build_with_user.initiator_by_username, workspace_build_with_user.initiator_by_name,
|
||||
tasks.id AS task_id
|
||||
FROM
|
||||
workspace_agents
|
||||
JOIN
|
||||
@@ -17199,6 +17585,10 @@ JOIN
|
||||
workspaces
|
||||
ON
|
||||
workspace_build_with_user.workspace_id = workspaces.id
|
||||
LEFT JOIN
|
||||
tasks
|
||||
ON
|
||||
tasks.workspace_id = workspaces.id
|
||||
WHERE
|
||||
-- This should only match 1 agent, so 1 returned row or 0.
|
||||
workspace_agents.auth_token = $1::uuid
|
||||
@@ -17222,6 +17612,7 @@ type GetWorkspaceAgentAndLatestBuildByAuthTokenRow struct {
|
||||
WorkspaceTable WorkspaceTable `db:"workspace_table" json:"workspace_table"`
|
||||
WorkspaceAgent WorkspaceAgent `db:"workspace_agent" json:"workspace_agent"`
|
||||
WorkspaceBuild WorkspaceBuild `db:"workspace_build" json:"workspace_build"`
|
||||
TaskID uuid.NullUUID `db:"task_id" json:"task_id"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) GetWorkspaceAgentAndLatestBuildByAuthToken(ctx context.Context, authToken uuid.UUID) (GetWorkspaceAgentAndLatestBuildByAuthTokenRow, error) {
|
||||
@@ -17301,6 +17692,7 @@ func (q *sqlQuerier) GetWorkspaceAgentAndLatestBuildByAuthToken(ctx context.Cont
|
||||
&i.WorkspaceBuild.InitiatorByAvatarUrl,
|
||||
&i.WorkspaceBuild.InitiatorByUsername,
|
||||
&i.WorkspaceBuild.InitiatorByName,
|
||||
&i.TaskID,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
@@ -21542,7 +21934,7 @@ func (q *sqlQuerier) GetWorkspaceACLByID(ctx context.Context, id uuid.UUID) (Get
|
||||
|
||||
const getWorkspaceByAgentID = `-- name: GetWorkspaceByAgentID :one
|
||||
SELECT
|
||||
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description
|
||||
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description, task_id
|
||||
FROM
|
||||
workspaces_expanded as workspaces
|
||||
WHERE
|
||||
@@ -21603,13 +21995,14 @@ func (q *sqlQuerier) GetWorkspaceByAgentID(ctx context.Context, agentID uuid.UUI
|
||||
&i.TemplateDisplayName,
|
||||
&i.TemplateIcon,
|
||||
&i.TemplateDescription,
|
||||
&i.TaskID,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getWorkspaceByID = `-- name: GetWorkspaceByID :one
|
||||
SELECT
|
||||
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description
|
||||
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description, task_id
|
||||
FROM
|
||||
workspaces_expanded
|
||||
WHERE
|
||||
@@ -21651,13 +22044,14 @@ func (q *sqlQuerier) GetWorkspaceByID(ctx context.Context, id uuid.UUID) (Worksp
|
||||
&i.TemplateDisplayName,
|
||||
&i.TemplateIcon,
|
||||
&i.TemplateDescription,
|
||||
&i.TaskID,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getWorkspaceByOwnerIDAndName = `-- name: GetWorkspaceByOwnerIDAndName :one
|
||||
SELECT
|
||||
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description
|
||||
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description, task_id
|
||||
FROM
|
||||
workspaces_expanded as workspaces
|
||||
WHERE
|
||||
@@ -21706,13 +22100,14 @@ func (q *sqlQuerier) GetWorkspaceByOwnerIDAndName(ctx context.Context, arg GetWo
|
||||
&i.TemplateDisplayName,
|
||||
&i.TemplateIcon,
|
||||
&i.TemplateDescription,
|
||||
&i.TaskID,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getWorkspaceByResourceID = `-- name: GetWorkspaceByResourceID :one
|
||||
SELECT
|
||||
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description
|
||||
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description, task_id
|
||||
FROM
|
||||
workspaces_expanded as workspaces
|
||||
WHERE
|
||||
@@ -21768,13 +22163,14 @@ func (q *sqlQuerier) GetWorkspaceByResourceID(ctx context.Context, resourceID uu
|
||||
&i.TemplateDisplayName,
|
||||
&i.TemplateIcon,
|
||||
&i.TemplateDescription,
|
||||
&i.TaskID,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getWorkspaceByWorkspaceAppID = `-- name: GetWorkspaceByWorkspaceAppID :one
|
||||
SELECT
|
||||
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description
|
||||
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description, task_id
|
||||
FROM
|
||||
workspaces_expanded as workspaces
|
||||
WHERE
|
||||
@@ -21842,6 +22238,7 @@ func (q *sqlQuerier) GetWorkspaceByWorkspaceAppID(ctx context.Context, workspace
|
||||
&i.TemplateDisplayName,
|
||||
&i.TemplateIcon,
|
||||
&i.TemplateDescription,
|
||||
&i.TaskID,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
@@ -21891,7 +22288,7 @@ SELECT
|
||||
),
|
||||
filtered_workspaces AS (
|
||||
SELECT
|
||||
workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite, workspaces.next_start_at, workspaces.group_acl, workspaces.user_acl, workspaces.owner_avatar_url, workspaces.owner_username, workspaces.owner_name, workspaces.organization_name, workspaces.organization_display_name, workspaces.organization_icon, workspaces.organization_description, workspaces.template_name, workspaces.template_display_name, workspaces.template_icon, workspaces.template_description,
|
||||
workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite, workspaces.next_start_at, workspaces.group_acl, workspaces.user_acl, workspaces.owner_avatar_url, workspaces.owner_username, workspaces.owner_name, workspaces.organization_name, workspaces.organization_display_name, workspaces.organization_icon, workspaces.organization_description, workspaces.template_name, workspaces.template_display_name, workspaces.template_icon, workspaces.template_description, workspaces.task_id,
|
||||
latest_build.template_version_id,
|
||||
latest_build.template_version_name,
|
||||
latest_build.completed_at as latest_build_completed_at,
|
||||
@@ -21899,7 +22296,6 @@ SELECT
|
||||
latest_build.error as latest_build_error,
|
||||
latest_build.transition as latest_build_transition,
|
||||
latest_build.job_status as latest_build_status,
|
||||
latest_build.has_ai_task as latest_build_has_ai_task,
|
||||
latest_build.has_external_agent as latest_build_has_external_agent
|
||||
FROM
|
||||
workspaces_expanded as workspaces
|
||||
@@ -22133,25 +22529,19 @@ WHERE
|
||||
(latest_build.template_version_id = template.active_version_id) = $18 :: boolean
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by has_ai_task in latest build
|
||||
-- Filter by has_ai_task, checks if this is a task workspace.
|
||||
AND CASE
|
||||
WHEN $19 :: boolean IS NOT NULL THEN
|
||||
(COALESCE(latest_build.has_ai_task, false) OR (
|
||||
-- If the build has no AI task, it means that the provisioner job is in progress
|
||||
-- and we don't know if it has an AI task yet. In this case, we optimistically
|
||||
-- assume that it has an AI task if the AI Prompt parameter is not empty. This
|
||||
-- lets the AI Task frontend spawn a task and see it immediately after instead of
|
||||
-- having to wait for the build to complete.
|
||||
latest_build.has_ai_task IS NULL AND
|
||||
latest_build.completed_at IS NULL AND
|
||||
EXISTS (
|
||||
SELECT 1
|
||||
FROM workspace_build_parameters
|
||||
WHERE workspace_build_parameters.workspace_build_id = latest_build.id
|
||||
AND workspace_build_parameters.name = 'AI Prompt'
|
||||
AND workspace_build_parameters.value != ''
|
||||
)
|
||||
)) = ($19 :: boolean)
|
||||
WHEN $19::boolean IS NOT NULL
|
||||
THEN $19::boolean = EXISTS (
|
||||
SELECT
|
||||
1
|
||||
FROM
|
||||
tasks
|
||||
WHERE
|
||||
-- Consider all tasks, deleting a task does not turn the
|
||||
-- workspace into a non-task workspace.
|
||||
tasks.workspace_id = workspaces.id
|
||||
)
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by has_external_agent in latest build
|
||||
@@ -22182,7 +22572,7 @@ WHERE
|
||||
-- @authorize_filter
|
||||
), filtered_workspaces_order AS (
|
||||
SELECT
|
||||
fw.id, fw.created_at, fw.updated_at, fw.owner_id, fw.organization_id, fw.template_id, fw.deleted, fw.name, fw.autostart_schedule, fw.ttl, fw.last_used_at, fw.dormant_at, fw.deleting_at, fw.automatic_updates, fw.favorite, fw.next_start_at, fw.group_acl, fw.user_acl, fw.owner_avatar_url, fw.owner_username, fw.owner_name, fw.organization_name, fw.organization_display_name, fw.organization_icon, fw.organization_description, fw.template_name, fw.template_display_name, fw.template_icon, fw.template_description, fw.template_version_id, fw.template_version_name, fw.latest_build_completed_at, fw.latest_build_canceled_at, fw.latest_build_error, fw.latest_build_transition, fw.latest_build_status, fw.latest_build_has_ai_task, fw.latest_build_has_external_agent
|
||||
fw.id, fw.created_at, fw.updated_at, fw.owner_id, fw.organization_id, fw.template_id, fw.deleted, fw.name, fw.autostart_schedule, fw.ttl, fw.last_used_at, fw.dormant_at, fw.deleting_at, fw.automatic_updates, fw.favorite, fw.next_start_at, fw.group_acl, fw.user_acl, fw.owner_avatar_url, fw.owner_username, fw.owner_name, fw.organization_name, fw.organization_display_name, fw.organization_icon, fw.organization_description, fw.template_name, fw.template_display_name, fw.template_icon, fw.template_description, fw.task_id, fw.template_version_id, fw.template_version_name, fw.latest_build_completed_at, fw.latest_build_canceled_at, fw.latest_build_error, fw.latest_build_transition, fw.latest_build_status, fw.latest_build_has_external_agent
|
||||
FROM
|
||||
filtered_workspaces fw
|
||||
ORDER BY
|
||||
@@ -22203,7 +22593,7 @@ WHERE
|
||||
$25
|
||||
), filtered_workspaces_order_with_summary AS (
|
||||
SELECT
|
||||
fwo.id, fwo.created_at, fwo.updated_at, fwo.owner_id, fwo.organization_id, fwo.template_id, fwo.deleted, fwo.name, fwo.autostart_schedule, fwo.ttl, fwo.last_used_at, fwo.dormant_at, fwo.deleting_at, fwo.automatic_updates, fwo.favorite, fwo.next_start_at, fwo.group_acl, fwo.user_acl, fwo.owner_avatar_url, fwo.owner_username, fwo.owner_name, fwo.organization_name, fwo.organization_display_name, fwo.organization_icon, fwo.organization_description, fwo.template_name, fwo.template_display_name, fwo.template_icon, fwo.template_description, fwo.template_version_id, fwo.template_version_name, fwo.latest_build_completed_at, fwo.latest_build_canceled_at, fwo.latest_build_error, fwo.latest_build_transition, fwo.latest_build_status, fwo.latest_build_has_ai_task, fwo.latest_build_has_external_agent
|
||||
fwo.id, fwo.created_at, fwo.updated_at, fwo.owner_id, fwo.organization_id, fwo.template_id, fwo.deleted, fwo.name, fwo.autostart_schedule, fwo.ttl, fwo.last_used_at, fwo.dormant_at, fwo.deleting_at, fwo.automatic_updates, fwo.favorite, fwo.next_start_at, fwo.group_acl, fwo.user_acl, fwo.owner_avatar_url, fwo.owner_username, fwo.owner_name, fwo.organization_name, fwo.organization_display_name, fwo.organization_icon, fwo.organization_description, fwo.template_name, fwo.template_display_name, fwo.template_icon, fwo.template_description, fwo.task_id, fwo.template_version_id, fwo.template_version_name, fwo.latest_build_completed_at, fwo.latest_build_canceled_at, fwo.latest_build_error, fwo.latest_build_transition, fwo.latest_build_status, fwo.latest_build_has_external_agent
|
||||
FROM
|
||||
filtered_workspaces_order fwo
|
||||
-- Return a technical summary row with total count of workspaces.
|
||||
@@ -22239,6 +22629,7 @@ WHERE
|
||||
'', -- template_display_name
|
||||
'', -- template_icon
|
||||
'', -- template_description
|
||||
'00000000-0000-0000-0000-000000000000'::uuid, -- task_id
|
||||
-- Extra columns added to ` + "`" + `filtered_workspaces` + "`" + `
|
||||
'00000000-0000-0000-0000-000000000000'::uuid, -- template_version_id
|
||||
'', -- template_version_name
|
||||
@@ -22247,7 +22638,6 @@ WHERE
|
||||
'', -- latest_build_error
|
||||
'start'::workspace_transition, -- latest_build_transition
|
||||
'unknown'::provisioner_job_status, -- latest_build_status
|
||||
false, -- latest_build_has_ai_task
|
||||
false -- latest_build_has_external_agent
|
||||
WHERE
|
||||
$27 :: boolean = true
|
||||
@@ -22258,7 +22648,7 @@ WHERE
|
||||
filtered_workspaces
|
||||
)
|
||||
SELECT
|
||||
fwos.id, fwos.created_at, fwos.updated_at, fwos.owner_id, fwos.organization_id, fwos.template_id, fwos.deleted, fwos.name, fwos.autostart_schedule, fwos.ttl, fwos.last_used_at, fwos.dormant_at, fwos.deleting_at, fwos.automatic_updates, fwos.favorite, fwos.next_start_at, fwos.group_acl, fwos.user_acl, fwos.owner_avatar_url, fwos.owner_username, fwos.owner_name, fwos.organization_name, fwos.organization_display_name, fwos.organization_icon, fwos.organization_description, fwos.template_name, fwos.template_display_name, fwos.template_icon, fwos.template_description, fwos.template_version_id, fwos.template_version_name, fwos.latest_build_completed_at, fwos.latest_build_canceled_at, fwos.latest_build_error, fwos.latest_build_transition, fwos.latest_build_status, fwos.latest_build_has_ai_task, fwos.latest_build_has_external_agent,
|
||||
fwos.id, fwos.created_at, fwos.updated_at, fwos.owner_id, fwos.organization_id, fwos.template_id, fwos.deleted, fwos.name, fwos.autostart_schedule, fwos.ttl, fwos.last_used_at, fwos.dormant_at, fwos.deleting_at, fwos.automatic_updates, fwos.favorite, fwos.next_start_at, fwos.group_acl, fwos.user_acl, fwos.owner_avatar_url, fwos.owner_username, fwos.owner_name, fwos.organization_name, fwos.organization_display_name, fwos.organization_icon, fwos.organization_description, fwos.template_name, fwos.template_display_name, fwos.template_icon, fwos.template_description, fwos.task_id, fwos.template_version_id, fwos.template_version_name, fwos.latest_build_completed_at, fwos.latest_build_canceled_at, fwos.latest_build_error, fwos.latest_build_transition, fwos.latest_build_status, fwos.latest_build_has_external_agent,
|
||||
tc.count
|
||||
FROM
|
||||
filtered_workspaces_order_with_summary fwos
|
||||
@@ -22326,6 +22716,7 @@ type GetWorkspacesRow struct {
|
||||
TemplateDisplayName string `db:"template_display_name" json:"template_display_name"`
|
||||
TemplateIcon string `db:"template_icon" json:"template_icon"`
|
||||
TemplateDescription string `db:"template_description" json:"template_description"`
|
||||
TaskID uuid.NullUUID `db:"task_id" json:"task_id"`
|
||||
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
||||
TemplateVersionName sql.NullString `db:"template_version_name" json:"template_version_name"`
|
||||
LatestBuildCompletedAt sql.NullTime `db:"latest_build_completed_at" json:"latest_build_completed_at"`
|
||||
@@ -22333,7 +22724,6 @@ type GetWorkspacesRow struct {
|
||||
LatestBuildError sql.NullString `db:"latest_build_error" json:"latest_build_error"`
|
||||
LatestBuildTransition WorkspaceTransition `db:"latest_build_transition" json:"latest_build_transition"`
|
||||
LatestBuildStatus ProvisionerJobStatus `db:"latest_build_status" json:"latest_build_status"`
|
||||
LatestBuildHasAITask sql.NullBool `db:"latest_build_has_ai_task" json:"latest_build_has_ai_task"`
|
||||
LatestBuildHasExternalAgent sql.NullBool `db:"latest_build_has_external_agent" json:"latest_build_has_external_agent"`
|
||||
Count int64 `db:"count" json:"count"`
|
||||
}
|
||||
@@ -22408,6 +22798,7 @@ func (q *sqlQuerier) GetWorkspaces(ctx context.Context, arg GetWorkspacesParams)
|
||||
&i.TemplateDisplayName,
|
||||
&i.TemplateIcon,
|
||||
&i.TemplateDescription,
|
||||
&i.TaskID,
|
||||
&i.TemplateVersionID,
|
||||
&i.TemplateVersionName,
|
||||
&i.LatestBuildCompletedAt,
|
||||
@@ -22415,7 +22806,6 @@ func (q *sqlQuerier) GetWorkspaces(ctx context.Context, arg GetWorkspacesParams)
|
||||
&i.LatestBuildError,
|
||||
&i.LatestBuildTransition,
|
||||
&i.LatestBuildStatus,
|
||||
&i.LatestBuildHasAITask,
|
||||
&i.LatestBuildHasExternalAgent,
|
||||
&i.Count,
|
||||
); err != nil {
|
||||
|
||||
@@ -6,6 +6,14 @@ INSERT INTO aibridge_interceptions (
|
||||
)
|
||||
RETURNING *;
|
||||
|
||||
-- name: UpdateAIBridgeInterceptionEnded :one
|
||||
UPDATE aibridge_interceptions
|
||||
SET ended_at = @ended_at::timestamptz
|
||||
WHERE
|
||||
id = @id::uuid
|
||||
AND ended_at IS NULL
|
||||
RETURNING *;
|
||||
|
||||
-- name: InsertAIBridgeTokenUsage :one
|
||||
INSERT INTO aibridge_token_usages (
|
||||
id, interception_id, provider_response_id, input_tokens, output_tokens, metadata, created_at
|
||||
@@ -199,3 +207,122 @@ WHERE
|
||||
ORDER BY
|
||||
created_at ASC,
|
||||
id ASC;
|
||||
|
||||
-- name: ListAIBridgeInterceptionsTelemetrySummaries :many
|
||||
-- Finds all unique AIBridge interception telemetry summaries combinations
|
||||
-- (provider, model, client) in the given timeframe for telemetry reporting.
|
||||
SELECT
|
||||
DISTINCT ON (provider, model, client)
|
||||
provider,
|
||||
model,
|
||||
-- TODO: use the client value once we have it (see https://github.com/coder/aibridge/issues/31)
|
||||
'unknown' AS client
|
||||
FROM
|
||||
aibridge_interceptions
|
||||
WHERE
|
||||
ended_at IS NOT NULL -- incomplete interceptions are not included in summaries
|
||||
AND ended_at >= @ended_at_after::timestamptz
|
||||
AND ended_at < @ended_at_before::timestamptz;
|
||||
|
||||
-- name: CalculateAIBridgeInterceptionsTelemetrySummary :one
|
||||
-- Calculates the telemetry summary for a given provider, model, and client
|
||||
-- combination for telemetry reporting.
|
||||
WITH interceptions_in_range AS (
|
||||
-- Get all matching interceptions in the given timeframe.
|
||||
SELECT
|
||||
id,
|
||||
initiator_id,
|
||||
(ended_at - started_at) AS duration
|
||||
FROM
|
||||
aibridge_interceptions
|
||||
WHERE
|
||||
provider = @provider::text
|
||||
AND model = @model::text
|
||||
-- TODO: use the client value once we have it (see https://github.com/coder/aibridge/issues/31)
|
||||
AND 'unknown' = @client::text
|
||||
AND ended_at IS NOT NULL -- incomplete interceptions are not included in summaries
|
||||
AND ended_at >= @ended_at_after::timestamptz
|
||||
AND ended_at < @ended_at_before::timestamptz
|
||||
),
|
||||
interception_counts AS (
|
||||
SELECT
|
||||
COUNT(id) AS interception_count,
|
||||
COUNT(DISTINCT initiator_id) AS unique_initiator_count
|
||||
FROM
|
||||
interceptions_in_range
|
||||
),
|
||||
duration_percentiles AS (
|
||||
SELECT
|
||||
(COALESCE(PERCENTILE_CONT(0.50) WITHIN GROUP (ORDER BY EXTRACT(EPOCH FROM duration)), 0) * 1000)::bigint AS interception_duration_p50_millis,
|
||||
(COALESCE(PERCENTILE_CONT(0.90) WITHIN GROUP (ORDER BY EXTRACT(EPOCH FROM duration)), 0) * 1000)::bigint AS interception_duration_p90_millis,
|
||||
(COALESCE(PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY EXTRACT(EPOCH FROM duration)), 0) * 1000)::bigint AS interception_duration_p95_millis,
|
||||
(COALESCE(PERCENTILE_CONT(0.99) WITHIN GROUP (ORDER BY EXTRACT(EPOCH FROM duration)), 0) * 1000)::bigint AS interception_duration_p99_millis
|
||||
FROM
|
||||
interceptions_in_range
|
||||
),
|
||||
token_aggregates AS (
|
||||
SELECT
|
||||
COALESCE(SUM(tu.input_tokens), 0) AS token_count_input,
|
||||
COALESCE(SUM(tu.output_tokens), 0) AS token_count_output,
|
||||
-- Cached tokens are stored in metadata JSON, extract if available.
|
||||
-- Read tokens may be stored in:
|
||||
-- - cache_read_input (Anthropic)
|
||||
-- - prompt_cached (OpenAI)
|
||||
COALESCE(SUM(
|
||||
COALESCE((tu.metadata->>'cache_read_input')::bigint, 0) +
|
||||
COALESCE((tu.metadata->>'prompt_cached')::bigint, 0)
|
||||
), 0) AS token_count_cached_read,
|
||||
-- Written tokens may be stored in:
|
||||
-- - cache_creation_input (Anthropic)
|
||||
-- Note that cache_ephemeral_5m_input and cache_ephemeral_1h_input on
|
||||
-- Anthropic are included in the cache_creation_input field.
|
||||
COALESCE(SUM(
|
||||
COALESCE((tu.metadata->>'cache_creation_input')::bigint, 0)
|
||||
), 0) AS token_count_cached_written,
|
||||
COUNT(tu.id) AS token_usages_count
|
||||
FROM
|
||||
interceptions_in_range i
|
||||
LEFT JOIN
|
||||
aibridge_token_usages tu ON i.id = tu.interception_id
|
||||
),
|
||||
prompt_aggregates AS (
|
||||
SELECT
|
||||
COUNT(up.id) AS user_prompts_count
|
||||
FROM
|
||||
interceptions_in_range i
|
||||
LEFT JOIN
|
||||
aibridge_user_prompts up ON i.id = up.interception_id
|
||||
),
|
||||
tool_aggregates AS (
|
||||
SELECT
|
||||
COUNT(tu.id) FILTER (WHERE tu.injected = true) AS tool_calls_count_injected,
|
||||
COUNT(tu.id) FILTER (WHERE tu.injected = false) AS tool_calls_count_non_injected,
|
||||
COUNT(tu.id) FILTER (WHERE tu.injected = true AND tu.invocation_error IS NOT NULL) AS injected_tool_call_error_count
|
||||
FROM
|
||||
interceptions_in_range i
|
||||
LEFT JOIN
|
||||
aibridge_tool_usages tu ON i.id = tu.interception_id
|
||||
)
|
||||
SELECT
|
||||
ic.interception_count::bigint AS interception_count,
|
||||
dp.interception_duration_p50_millis::bigint AS interception_duration_p50_millis,
|
||||
dp.interception_duration_p90_millis::bigint AS interception_duration_p90_millis,
|
||||
dp.interception_duration_p95_millis::bigint AS interception_duration_p95_millis,
|
||||
dp.interception_duration_p99_millis::bigint AS interception_duration_p99_millis,
|
||||
ic.unique_initiator_count::bigint AS unique_initiator_count,
|
||||
pa.user_prompts_count::bigint AS user_prompts_count,
|
||||
tok_agg.token_usages_count::bigint AS token_usages_count,
|
||||
tok_agg.token_count_input::bigint AS token_count_input,
|
||||
tok_agg.token_count_output::bigint AS token_count_output,
|
||||
tok_agg.token_count_cached_read::bigint AS token_count_cached_read,
|
||||
tok_agg.token_count_cached_written::bigint AS token_count_cached_written,
|
||||
tool_agg.tool_calls_count_injected::bigint AS tool_calls_count_injected,
|
||||
tool_agg.tool_calls_count_non_injected::bigint AS tool_calls_count_non_injected,
|
||||
tool_agg.injected_tool_call_error_count::bigint AS injected_tool_call_error_count
|
||||
FROM
|
||||
interception_counts ic,
|
||||
duration_percentiles dp,
|
||||
token_aggregates tok_agg,
|
||||
prompt_aggregates pa,
|
||||
tool_aggregates tool_agg
|
||||
;
|
||||
|
||||
@@ -300,12 +300,8 @@ GROUP BY wpb.template_version_preset_id;
|
||||
-- Cancels all pending provisioner jobs for prebuilt workspaces on a specific preset from an
|
||||
-- inactive template version.
|
||||
-- This is an optimization to clean up stale pending jobs.
|
||||
UPDATE provisioner_jobs
|
||||
SET
|
||||
canceled_at = @now::timestamptz,
|
||||
completed_at = @now::timestamptz
|
||||
WHERE id IN (
|
||||
SELECT pj.id
|
||||
WITH jobs_to_cancel AS (
|
||||
SELECT pj.id, w.id AS workspace_id, w.template_id, wpb.template_version_preset_id
|
||||
FROM provisioner_jobs pj
|
||||
INNER JOIN workspace_prebuild_builds wpb ON wpb.job_id = pj.id
|
||||
INNER JOIN workspaces w ON w.id = wpb.workspace_id
|
||||
@@ -324,4 +320,54 @@ WHERE id IN (
|
||||
AND pj.canceled_at IS NULL
|
||||
AND pj.completed_at IS NULL
|
||||
)
|
||||
RETURNING id;
|
||||
UPDATE provisioner_jobs
|
||||
SET
|
||||
canceled_at = @now::timestamptz,
|
||||
completed_at = @now::timestamptz
|
||||
FROM jobs_to_cancel
|
||||
WHERE provisioner_jobs.id = jobs_to_cancel.id
|
||||
RETURNING jobs_to_cancel.id, jobs_to_cancel.workspace_id, jobs_to_cancel.template_id, jobs_to_cancel.template_version_preset_id;
|
||||
|
||||
-- name: GetOrganizationsWithPrebuildStatus :many
|
||||
-- GetOrganizationsWithPrebuildStatus returns organizations with prebuilds configured and their
|
||||
-- membership status for the prebuilds system user (org membership, group existence, group membership).
|
||||
WITH orgs_with_prebuilds AS (
|
||||
-- Get unique organizations that have presets with prebuilds configured
|
||||
SELECT DISTINCT o.id, o.name
|
||||
FROM organizations o
|
||||
INNER JOIN templates t ON t.organization_id = o.id
|
||||
INNER JOIN template_versions tv ON tv.template_id = t.id
|
||||
INNER JOIN template_version_presets tvp ON tvp.template_version_id = tv.id
|
||||
WHERE tvp.desired_instances IS NOT NULL
|
||||
),
|
||||
prebuild_user_membership AS (
|
||||
-- Check if the user is a member of the organizations
|
||||
SELECT om.organization_id
|
||||
FROM organization_members om
|
||||
INNER JOIN orgs_with_prebuilds owp ON owp.id = om.organization_id
|
||||
WHERE om.user_id = @user_id::uuid
|
||||
),
|
||||
prebuild_groups AS (
|
||||
-- Check if the organizations have the prebuilds group
|
||||
SELECT g.organization_id, g.id as group_id
|
||||
FROM groups g
|
||||
INNER JOIN orgs_with_prebuilds owp ON owp.id = g.organization_id
|
||||
WHERE g.name = @group_name::text
|
||||
),
|
||||
prebuild_group_membership AS (
|
||||
-- Check if the user is in the prebuilds group
|
||||
SELECT pg.organization_id
|
||||
FROM prebuild_groups pg
|
||||
INNER JOIN group_members gm ON gm.group_id = pg.group_id
|
||||
WHERE gm.user_id = @user_id::uuid
|
||||
)
|
||||
SELECT
|
||||
owp.id AS organization_id,
|
||||
owp.name AS organization_name,
|
||||
(pum.organization_id IS NOT NULL)::boolean AS has_prebuild_user,
|
||||
pg.group_id AS prebuilds_group_id,
|
||||
(pgm.organization_id IS NOT NULL)::boolean AS has_prebuild_user_in_group
|
||||
FROM orgs_with_prebuilds owp
|
||||
LEFT JOIN prebuild_groups pg ON pg.organization_id = owp.id
|
||||
LEFT JOIN prebuild_user_membership pum ON pum.organization_id = owp.id
|
||||
LEFT JOIN prebuild_group_membership pgm ON pgm.organization_id = owp.id;
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
INSERT INTO tasks
|
||||
(id, organization_id, owner_id, name, workspace_id, template_version_id, template_parameters, prompt, created_at)
|
||||
VALUES
|
||||
(gen_random_uuid(), $1, $2, $3, $4, $5, $6, $7, $8)
|
||||
($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
||||
RETURNING *;
|
||||
|
||||
-- name: UpdateTaskWorkspaceID :one
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
-- name: InsertTelemetryLock :exec
|
||||
-- Inserts a new lock row into the telemetry_locks table. Replicas should call
|
||||
-- this function prior to attempting to generate or publish a heartbeat event to
|
||||
-- the telemetry service.
|
||||
-- If the query returns a duplicate primary key error, the replica should not
|
||||
-- attempt to generate or publish the event to the telemetry service.
|
||||
INSERT INTO
|
||||
telemetry_locks (event_type, period_ending_at)
|
||||
VALUES
|
||||
($1, $2);
|
||||
|
||||
-- name: DeleteOldTelemetryLocks :exec
|
||||
-- Deletes old telemetry locks from the telemetry_locks table.
|
||||
DELETE FROM
|
||||
telemetry_locks
|
||||
WHERE
|
||||
period_ending_at < @period_ending_at_before::timestamptz;
|
||||
@@ -285,7 +285,8 @@ WHERE
|
||||
SELECT
|
||||
sqlc.embed(workspaces),
|
||||
sqlc.embed(workspace_agents),
|
||||
sqlc.embed(workspace_build_with_user)
|
||||
sqlc.embed(workspace_build_with_user),
|
||||
tasks.id AS task_id
|
||||
FROM
|
||||
workspace_agents
|
||||
JOIN
|
||||
@@ -300,6 +301,10 @@ JOIN
|
||||
workspaces
|
||||
ON
|
||||
workspace_build_with_user.workspace_id = workspaces.id
|
||||
LEFT JOIN
|
||||
tasks
|
||||
ON
|
||||
tasks.workspace_id = workspaces.id
|
||||
WHERE
|
||||
-- This should only match 1 agent, so 1 returned row or 0.
|
||||
workspace_agents.auth_token = @auth_token::uuid
|
||||
|
||||
@@ -117,7 +117,6 @@ SELECT
|
||||
latest_build.error as latest_build_error,
|
||||
latest_build.transition as latest_build_transition,
|
||||
latest_build.job_status as latest_build_status,
|
||||
latest_build.has_ai_task as latest_build_has_ai_task,
|
||||
latest_build.has_external_agent as latest_build_has_external_agent
|
||||
FROM
|
||||
workspaces_expanded as workspaces
|
||||
@@ -351,25 +350,19 @@ WHERE
|
||||
(latest_build.template_version_id = template.active_version_id) = sqlc.narg('using_active') :: boolean
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by has_ai_task in latest build
|
||||
-- Filter by has_ai_task, checks if this is a task workspace.
|
||||
AND CASE
|
||||
WHEN sqlc.narg('has_ai_task') :: boolean IS NOT NULL THEN
|
||||
(COALESCE(latest_build.has_ai_task, false) OR (
|
||||
-- If the build has no AI task, it means that the provisioner job is in progress
|
||||
-- and we don't know if it has an AI task yet. In this case, we optimistically
|
||||
-- assume that it has an AI task if the AI Prompt parameter is not empty. This
|
||||
-- lets the AI Task frontend spawn a task and see it immediately after instead of
|
||||
-- having to wait for the build to complete.
|
||||
latest_build.has_ai_task IS NULL AND
|
||||
latest_build.completed_at IS NULL AND
|
||||
EXISTS (
|
||||
SELECT 1
|
||||
FROM workspace_build_parameters
|
||||
WHERE workspace_build_parameters.workspace_build_id = latest_build.id
|
||||
AND workspace_build_parameters.name = 'AI Prompt'
|
||||
AND workspace_build_parameters.value != ''
|
||||
)
|
||||
)) = (sqlc.narg('has_ai_task') :: boolean)
|
||||
WHEN sqlc.narg('has_ai_task')::boolean IS NOT NULL
|
||||
THEN sqlc.narg('has_ai_task')::boolean = EXISTS (
|
||||
SELECT
|
||||
1
|
||||
FROM
|
||||
tasks
|
||||
WHERE
|
||||
-- Consider all tasks, deleting a task does not turn the
|
||||
-- workspace into a non-task workspace.
|
||||
tasks.workspace_id = workspaces.id
|
||||
)
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by has_external_agent in latest build
|
||||
@@ -457,6 +450,7 @@ WHERE
|
||||
'', -- template_display_name
|
||||
'', -- template_icon
|
||||
'', -- template_description
|
||||
'00000000-0000-0000-0000-000000000000'::uuid, -- task_id
|
||||
-- Extra columns added to `filtered_workspaces`
|
||||
'00000000-0000-0000-0000-000000000000'::uuid, -- template_version_id
|
||||
'', -- template_version_name
|
||||
@@ -465,7 +459,6 @@ WHERE
|
||||
'', -- latest_build_error
|
||||
'start'::workspace_transition, -- latest_build_transition
|
||||
'unknown'::provisioner_job_status, -- latest_build_status
|
||||
false, -- latest_build_has_ai_task
|
||||
false -- latest_build_has_external_agent
|
||||
WHERE
|
||||
@with_summary :: boolean = true
|
||||
|
||||
@@ -62,6 +62,7 @@ const (
|
||||
UniqueTaskWorkspaceAppsPkey UniqueConstraint = "task_workspace_apps_pkey" // ALTER TABLE ONLY task_workspace_apps ADD CONSTRAINT task_workspace_apps_pkey PRIMARY KEY (task_id, workspace_build_number);
|
||||
UniqueTasksPkey UniqueConstraint = "tasks_pkey" // ALTER TABLE ONLY tasks ADD CONSTRAINT tasks_pkey PRIMARY KEY (id);
|
||||
UniqueTelemetryItemsPkey UniqueConstraint = "telemetry_items_pkey" // ALTER TABLE ONLY telemetry_items ADD CONSTRAINT telemetry_items_pkey PRIMARY KEY (key);
|
||||
UniqueTelemetryLocksPkey UniqueConstraint = "telemetry_locks_pkey" // ALTER TABLE ONLY telemetry_locks ADD CONSTRAINT telemetry_locks_pkey PRIMARY KEY (event_type, period_ending_at);
|
||||
UniqueTemplateUsageStatsPkey UniqueConstraint = "template_usage_stats_pkey" // ALTER TABLE ONLY template_usage_stats ADD CONSTRAINT template_usage_stats_pkey PRIMARY KEY (start_time, template_id, user_id);
|
||||
UniqueTemplateVersionParametersTemplateVersionIDNameKey UniqueConstraint = "template_version_parameters_template_version_id_name_key" // ALTER TABLE ONLY template_version_parameters ADD CONSTRAINT template_version_parameters_template_version_id_name_key UNIQUE (template_version_id, name);
|
||||
UniqueTemplateVersionPresetParametersPkey UniqueConstraint = "template_version_preset_parameters_pkey" // ALTER TABLE ONLY template_version_preset_parameters ADD CONSTRAINT template_version_preset_parameters_pkey PRIMARY KEY (id);
|
||||
|
||||
@@ -118,6 +118,7 @@ func ExtractWorkspaceAgentAndLatestBuild(opts ExtractWorkspaceAgentAndLatestBuil
|
||||
OwnerID: row.WorkspaceTable.OwnerID,
|
||||
TemplateID: row.WorkspaceTable.TemplateID,
|
||||
VersionID: row.WorkspaceBuild.TemplateVersionID,
|
||||
TaskID: row.TaskID,
|
||||
BlockUserData: row.WorkspaceAgent.APIKeyScope == database.AgentKeyScopeEnumNoUserData,
|
||||
}),
|
||||
)
|
||||
|
||||
@@ -18,6 +18,7 @@ func GetAuthorizationServerMetadata(accessURL *url.URL) http.HandlerFunc {
|
||||
AuthorizationEndpoint: accessURL.JoinPath("/oauth2/authorize").String(),
|
||||
TokenEndpoint: accessURL.JoinPath("/oauth2/tokens").String(),
|
||||
RegistrationEndpoint: accessURL.JoinPath("/oauth2/register").String(), // RFC 7591
|
||||
RevocationEndpoint: accessURL.JoinPath("/oauth2/revoke").String(), // RFC 7009
|
||||
ResponseTypesSupported: []string{"code"},
|
||||
GrantTypesSupported: []string{"authorization_code", "refresh_token"},
|
||||
CodeChallengeMethodsSupported: []string{"S256"},
|
||||
|
||||
@@ -37,13 +37,18 @@ type ReconciliationOrchestrator interface {
|
||||
TrackResourceReplacement(ctx context.Context, workspaceID, buildID uuid.UUID, replacements []*sdkproto.ResourceReplacement)
|
||||
}
|
||||
|
||||
// ReconcileStats contains statistics about a reconciliation cycle.
|
||||
type ReconcileStats struct {
|
||||
Elapsed time.Duration
|
||||
}
|
||||
|
||||
type Reconciler interface {
|
||||
StateSnapshotter
|
||||
|
||||
// ReconcileAll orchestrates the reconciliation of all prebuilds across all templates.
|
||||
// It takes a global snapshot of the system state and then reconciles each preset
|
||||
// in parallel, creating or deleting prebuilds as needed to reach their desired states.
|
||||
ReconcileAll(ctx context.Context) error
|
||||
ReconcileAll(ctx context.Context) (ReconcileStats, error)
|
||||
}
|
||||
|
||||
// StateSnapshotter defines the operations necessary to capture workspace prebuilds state.
|
||||
|
||||
@@ -17,7 +17,11 @@ func (NoopReconciler) Run(context.Context) {}
|
||||
func (NoopReconciler) Stop(context.Context, error) {}
|
||||
func (NoopReconciler) TrackResourceReplacement(context.Context, uuid.UUID, uuid.UUID, []*sdkproto.ResourceReplacement) {
|
||||
}
|
||||
func (NoopReconciler) ReconcileAll(context.Context) error { return nil }
|
||||
|
||||
func (NoopReconciler) ReconcileAll(context.Context) (ReconcileStats, error) {
|
||||
return ReconcileStats{}, nil
|
||||
}
|
||||
|
||||
func (NoopReconciler) SnapshotState(context.Context, database.Store) (*GlobalSnapshot, error) {
|
||||
return &GlobalSnapshot{}, nil
|
||||
}
|
||||
|
||||
@@ -2278,6 +2278,14 @@ func (s *server) completeWorkspaceBuildJob(ctx context.Context, job database.Pro
|
||||
if err != nil {
|
||||
return xerrors.Errorf("update workspace deleted: %w", err)
|
||||
}
|
||||
if workspace.TaskID.Valid {
|
||||
if _, err := db.DeleteTask(ctx, database.DeleteTaskParams{
|
||||
ID: workspace.TaskID.UUID,
|
||||
DeletedAt: dbtime.Now(),
|
||||
}); err != nil && !errors.Is(err, sql.ErrNoRows) {
|
||||
return xerrors.Errorf("delete task related to workspace: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}, nil)
|
||||
|
||||
@@ -0,0 +1,104 @@
|
||||
# Rego authorization policy
|
||||
|
||||
## Code style
|
||||
|
||||
It's a good idea to consult the [Rego style guide](https://docs.styra.com/opa/rego-style-guide). The "Variables and Data Types" section in particular has some helpful and non-obvious advice in it.
|
||||
|
||||
## Debugging
|
||||
|
||||
Open Policy Agent provides a CLI and a playground that can be used for evaluating, formatting, testing, and linting policies.
|
||||
|
||||
### CLI
|
||||
|
||||
Below are some helpful commands you can use for debugging.
|
||||
|
||||
For full evaluation, run:
|
||||
|
||||
```sh
|
||||
opa eval --format=pretty 'data.authz.allow' -d policy.rego -i input.json
|
||||
```
|
||||
|
||||
For partial evaluation, run:
|
||||
|
||||
```sh
|
||||
opa eval --partial --format=pretty 'data.authz.allow' -d policy.rego \
|
||||
--unknowns input.object.owner --unknowns input.object.org_owner \
|
||||
--unknowns input.object.acl_user_list --unknowns input.object.acl_group_list \
|
||||
-i input.json
|
||||
```
|
||||
|
||||
### Playground
|
||||
|
||||
Use the [Open Policy Agent Playground](https://play.openpolicyagent.org/) while editing to getting linting, code formatting, and help debugging!
|
||||
|
||||
You can use the contents of input.json as a starting point for your own testing input. Paste the contents of policy.rego into the left-hand side of the playground, and the contents of input.json into the "Input" section. Click "Evaluate" and you should see something like the following in the output.
|
||||
|
||||
```json
|
||||
{
|
||||
"allow": true,
|
||||
"check_scope_allow_list": true,
|
||||
"org": 0,
|
||||
"org_member": 0,
|
||||
"org_memberships": [],
|
||||
"permission_allow": true,
|
||||
"role_allow": true,
|
||||
"scope_allow": true,
|
||||
"scope_org": 0,
|
||||
"scope_org_member": 0,
|
||||
"scope_site": 1,
|
||||
"scope_user": 0,
|
||||
"site": 1,
|
||||
"user": 0
|
||||
}
|
||||
```
|
||||
|
||||
## Levels
|
||||
|
||||
Permissions are evaluated at four levels: site, user, org, org_member.
|
||||
|
||||
For each level, two checks are performed:
|
||||
- Do the subject's permissions allow them to perform this action?
|
||||
- Does the subject's scope allow them to perform this action?
|
||||
|
||||
Each of these checks gets a "vote", which must one of three values:
|
||||
- -1 to deny (usually because of a negative permission)
|
||||
- 0 to abstain (no matching permission)
|
||||
- 1 to allow
|
||||
|
||||
If a level abstains, then the decision gets deferred to the next level. When
|
||||
there is no "next" level to defer to it is equivalent to being denied.
|
||||
|
||||
### Scope
|
||||
Additionally, each input has a "scope" that can be thought of as a second set of permissions, where each permission belongs to one of the four levels–exactly the same as role permissions. An action is only allowed if it is allowed by both the subject's permissions _and_ their current scope. This is to allow issuing tokens for a subject that have a subset of the full subjects permissions.
|
||||
|
||||
For example, you may have a scope like...
|
||||
|
||||
```json
|
||||
{
|
||||
"by_org_id": {
|
||||
"<org_id>": {
|
||||
"member": [{ "resource_type": "workspace", "action": "*" }]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
...to limit the token to only accessing workspaces owned by the user within a specific org. This provides some assurances for an admin user, that the token can only access intended resources, rather than having full access to everything.
|
||||
|
||||
The final policy decision is determined by evaluating each of these checks in their proper precedence order from the `allow` rule.
|
||||
|
||||
## Unknown values
|
||||
|
||||
This policy is specifically constructed to compress to a set of queries if 'input.object.owner' and 'input.object.org_owner' are unknown. There is no specific set of rules that will guarantee that this policy has this property, however, there are some tricks. We have tests that enforce this property, so any changes that pass the tests will be okay.
|
||||
|
||||
Some general rules to follow:
|
||||
|
||||
1. Do not use unknown values in any [comprehensions](https://www.openpolicyagent.org/docs/latest/policy-language/#comprehensions) or iterations.
|
||||
|
||||
2. Use the unknown values as minimally as possible.
|
||||
|
||||
3. Avoid making code branches based on the value of the unknown field.
|
||||
|
||||
Unknown values are like a "set" of possible values (which is why rule 1 usually breaks things).
|
||||
|
||||
For example, in the org level rules, we calculate the "vote" for all orgs, rather than just the `input.object.org_owner`. This way, if the `org_owner` changes, then we don't need to recompute any votes; we already have it for the changed value. This means we don't need branching, because the end result is just a lookup table.
|
||||
+83
-35
@@ -58,22 +58,68 @@ This can be represented by the following truth table, where Y represents _positi
|
||||
- `+site.app.*.read`: allowed to perform the `read` action against all objects of type `app` in a given Coder deployment.
|
||||
- `-user.workspace.*.create`: user is not allowed to create workspaces.
|
||||
|
||||
## Levels
|
||||
|
||||
A user can be given (or deprived) a permission at several levels. Currently,
|
||||
those levels are:
|
||||
|
||||
- Site-wide level
|
||||
- Organization level
|
||||
- User level
|
||||
- Organization member level
|
||||
|
||||
The site-wide level is the most authoritative. Any permission granted or denied at the side-wide level is absolute. After checking the site-wide level, depending of if the resource is owned by an organization or not, it will check the other levels.
|
||||
|
||||
- If the resource is owned by an organization, the next most authoritative level is the organization level. It acts like the site-wide level, but only for resources within the corresponding organization. The user can use that permission on any resource within that organization.
|
||||
- After the organization level is the member level. This level only applies to resources that are owned by both the organization _and_ the user.
|
||||
|
||||
- If the resource is not owned by an organization, the next level to check is the user level. This level only applies to resources owned by the user and that are not owned by any organization.
|
||||
|
||||
```
|
||||
┌──────────┐
|
||||
│ Site │
|
||||
└─────┬────┘
|
||||
┌──────────┴───────────┐
|
||||
┌──┤ Owned by an org? ├──┐
|
||||
│ └──────────────────────┘ │
|
||||
┌──┴──┐ ┌──┴─┐
|
||||
│ Yes │ │ No │
|
||||
└──┬──┘ └──┬─┘
|
||||
┌────────┴─────────┐ ┌─────┴────┐
|
||||
│ Organization │ │ User │
|
||||
└────────┬─────────┘ └──────────┘
|
||||
┌─────┴──────┐
|
||||
│ Member │
|
||||
└────────────┘
|
||||
```
|
||||
|
||||
## Roles
|
||||
|
||||
A _role_ is a set of permissions. When evaluating a role's permission to form an action, all the relevant permissions for the role are combined at each level. Permissions at a higher level override permissions at a lower level.
|
||||
|
||||
The following table shows the per-level role evaluation.
|
||||
Y indicates that the role provides positive permissions, N indicates the role provides negative permissions, and _indicates the role does not provide positive or negative permissions. YN_ indicates that the value in the cell does not matter for the access result.
|
||||
The following tables show the per-level role evaluation. Y indicates that the role provides positive permissions, N indicates the role provides negative permissions, and _indicates the role does not provide positive or negative permissions. YN_ indicates that the value in the cell does not matter for the access result. The table varies depending on if the resource belongs to an organization or not.
|
||||
|
||||
| Role (example) | Site | Org | User | Result |
|
||||
|-----------------|------|------|------|--------|
|
||||
| site-admin | Y | YN\_ | YN\_ | Y |
|
||||
| no-permission | N | YN\_ | YN\_ | N |
|
||||
| org-admin | \_ | Y | YN\_ | Y |
|
||||
| non-org-member | \_ | N | YN\_ | N |
|
||||
| user | \_ | \_ | Y | Y |
|
||||
| | \_ | \_ | N | N |
|
||||
| unauthenticated | \_ | \_ | \_ | N |
|
||||
If the resource is owned by an organization, such as a template or a workspace:
|
||||
|
||||
| Role (example) | Site | Org | OrgMember | Result |
|
||||
|--------------------------|------|------|-----------|--------|
|
||||
| site-admin | Y | YN\_ | YN\_ | Y |
|
||||
| negative-site-permission | N | YN\_ | YN\_ | N |
|
||||
| org-admin | \_ | Y | YN\_ | Y |
|
||||
| non-org-member | \_ | N | YN\_ | N |
|
||||
| member-owned | \_ | \_ | Y | Y |
|
||||
| not-member-owned | \_ | \_ | N | N |
|
||||
| unauthenticated | \_ | \_ | \_ | N |
|
||||
|
||||
If the resource is not owned by an organization:
|
||||
|
||||
| Role (example) | Site | User | Result |
|
||||
|--------------------------|------|------|--------|
|
||||
| site-admin | Y | YN\_ | Y |
|
||||
| negative-site-permission | N | YN\_ | N |
|
||||
| user-owned | \_ | Y | Y |
|
||||
| not-user-owned | \_ | N | N |
|
||||
| unauthenticated | \_ | \_ | N |
|
||||
|
||||
## Scopes
|
||||
|
||||
@@ -91,15 +137,17 @@ The use case for specifying this type of permission in a role is limited, and do
|
||||
Example of a scope for a workspace agent token, using an `allow_list` containing a single resource id.
|
||||
|
||||
```javascript
|
||||
"scope": {
|
||||
"name": "workspace_agent",
|
||||
"display_name": "Workspace_Agent",
|
||||
// The ID of the given workspace the agent token correlates to.
|
||||
"allow_list": ["10d03e62-7703-4df5-a358-4f76577d4e2f"],
|
||||
"site": [/* ... perms ... */],
|
||||
"org": {/* ... perms ... */},
|
||||
"user": [/* ... perms ... */]
|
||||
}
|
||||
{
|
||||
"scope": {
|
||||
"name": "workspace_agent",
|
||||
"display_name": "Workspace_Agent",
|
||||
// The ID of the given workspace the agent token correlates to.
|
||||
"allow_list": ["10d03e62-7703-4df5-a358-4f76577d4e2f"],
|
||||
"site": [/* ... perms ... */],
|
||||
"org": {/* ... perms ... */},
|
||||
"user": [/* ... perms ... */]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## OPA (Open Policy Agent)
|
||||
@@ -124,31 +172,31 @@ To learn more about OPA and Rego, see https://www.openpolicyagent.org/docs.
|
||||
There are two types of evaluation in OPA:
|
||||
|
||||
- **Full evaluation**: Produces a decision that can be enforced.
|
||||
This is the default evaluation mode, where OPA evaluates the policy using `input` data that contains all known values and returns output data with the `allow` variable.
|
||||
This is the default evaluation mode, where OPA evaluates the policy using `input` data that contains all known values and returns output data with the `allow` variable.
|
||||
- **Partial evaluation**: Produces a new policy that can be evaluated later when the _unknowns_ become _known_.
|
||||
This is an optimization in OPA where it evaluates as much of the policy as possible without resolving expressions that depend on _unknown_ values from the `input`.
|
||||
To learn more about partial evaluation, see this [OPA blog post](https://blog.openpolicyagent.org/partial-evaluation-162750eaf422).
|
||||
This is an optimization in OPA where it evaluates as much of the policy as possible without resolving expressions that depend on _unknown_ values from the `input`.
|
||||
To learn more about partial evaluation, see this [OPA blog post](https://blog.openpolicyagent.org/partial-evaluation-162750eaf422).
|
||||
|
||||
Application of Full and Partial evaluation in `rbac` package:
|
||||
|
||||
- **Full Evaluation** is handled by the `RegoAuthorizer.Authorize()` method in [`authz.go`](authz.go).
|
||||
This method determines whether a subject (user) can perform a specific action on an object.
|
||||
It performs a full evaluation of the Rego policy, which returns the `allow` variable to decide whether access is granted (`true`) or denied (`false` or undefined).
|
||||
This method determines whether a subject (user) can perform a specific action on an object.
|
||||
It performs a full evaluation of the Rego policy, which returns the `allow` variable to decide whether access is granted (`true`) or denied (`false` or undefined).
|
||||
- **Partial Evaluation** is handled by the `RegoAuthorizer.Prepare()` method in [`authz.go`](authz.go).
|
||||
This method compiles OPA’s partial evaluation queries into `SQL WHERE` clauses.
|
||||
These clauses are then used to enforce authorization directly in database queries, rather than in application code.
|
||||
This method compiles OPA’s partial evaluation queries into `SQL WHERE` clauses.
|
||||
These clauses are then used to enforce authorization directly in database queries, rather than in application code.
|
||||
|
||||
Authorization Patterns:
|
||||
|
||||
- Fetch-then-authorize: an object is first retrieved from the database, and a single authorization check is performed using full evaluation via `Authorize()`.
|
||||
- Authorize-while-fetching: Partial evaluation via `Prepare()` is used to inject SQL filters directly into queries, allowing efficient authorization of many objects of the same type.
|
||||
`dbauthz` methods that enforce authorization directly in the SQL query are prefixed with `Authorized`, for example, `GetAuthorizedWorkspaces`.
|
||||
`dbauthz` methods that enforce authorization directly in the SQL query are prefixed with `Authorized`, for example, `GetAuthorizedWorkspaces`.
|
||||
|
||||
## Testing
|
||||
|
||||
- OPA Playground: https://play.openpolicyagent.org/
|
||||
- OPA CLI (`opa eval`): useful for experimenting with different inputs and understanding how the policy behaves under various conditions.
|
||||
`opa eval` returns the constraints that must be satisfied for a rule to evaluate to `true`.
|
||||
`opa eval` returns the constraints that must be satisfied for a rule to evaluate to `true`.
|
||||
- `opa eval` requires an `input.json` file containing the input data to run the policy against.
|
||||
You can generate this file using the [gen_input.go](../../scripts/rbac-authz/gen_input.go) script.
|
||||
Note: the script currently produces a fixed input. You may need to tweak it for your specific use case.
|
||||
@@ -196,12 +244,12 @@ The script [`benchmark_authz.sh`](../../scripts/rbac-authz/benchmark_authz.sh) r
|
||||
|
||||
- To run benchmark on the current branch:
|
||||
|
||||
```bash
|
||||
benchmark_authz.sh --single
|
||||
```
|
||||
```bash
|
||||
benchmark_authz.sh --single
|
||||
```
|
||||
|
||||
- To compare benchmarks between 2 branches:
|
||||
|
||||
```bash
|
||||
benchmark_authz.sh --compare main prebuild_policy
|
||||
```
|
||||
```bash
|
||||
benchmark_authz.sh --compare main prebuild_policy
|
||||
```
|
||||
|
||||
@@ -165,6 +165,10 @@ func (role Role) regoValue() ast.Value {
|
||||
ast.StringTerm("org"),
|
||||
ast.NewTerm(regoSlice(p.Org)),
|
||||
},
|
||||
[2]*ast.Term{
|
||||
ast.StringTerm("member"),
|
||||
ast.NewTerm(regoSlice(p.Member)),
|
||||
},
|
||||
),
|
||||
))
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user