Compare commits
16 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 097e085fcb | |||
| b8ab2d351f | |||
| 1b1e3cb706 | |||
| ea0aca0f26 | |||
| 563612eb3b | |||
| fa43ea8e68 | |||
| d82ba7e3a4 | |||
| cb4ea1f397 | |||
| effbe4e52e | |||
| 6424093146 | |||
| 2cf4b5c5a2 | |||
| a7b3efb540 | |||
| 0b5542f933 | |||
| ba14acf4e8 | |||
| d0a2e6d603 | |||
| 2a22440b0e |
@@ -27,7 +27,5 @@ ignorePatterns:
|
||||
- pattern: "splunk.com"
|
||||
- pattern: "stackoverflow.com/questions"
|
||||
- pattern: "developer.hashicorp.com/terraform/language"
|
||||
- pattern: "platform.openai.com/docs/api-reference"
|
||||
- pattern: "api.openai.com"
|
||||
aliveStatusCodes:
|
||||
- 200
|
||||
|
||||
@@ -5,13 +5,6 @@ runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Setup sqlc
|
||||
# uses: sqlc-dev/setup-sqlc@c0209b9199cd1cce6a14fc27cabcec491b651761 # v4.0.0
|
||||
# with:
|
||||
# sqlc-version: "1.30.0"
|
||||
|
||||
# Switched to coder/sqlc fork to fix ambiguous column bug, see:
|
||||
# - https://github.com/coder/sqlc/pull/1
|
||||
# - https://github.com/sqlc-dev/sqlc/pull/4159
|
||||
shell: bash
|
||||
run: |
|
||||
CGO_ENABLED=1 go install github.com/coder/sqlc/cmd/sqlc@aab4e865a51df0c43e1839f81a9d349b41d14f05
|
||||
uses: sqlc-dev/setup-sqlc@c0209b9199cd1cce6a14fc27cabcec491b651761 # v4.0.0
|
||||
with:
|
||||
sqlc-version: "1.27.0"
|
||||
|
||||
@@ -7,5 +7,5 @@ runs:
|
||||
- name: Install Terraform
|
||||
uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3.1.2
|
||||
with:
|
||||
terraform_version: 1.13.4
|
||||
terraform_version: 1.13.0
|
||||
terraform_wrapper: false
|
||||
|
||||
+21
-23
@@ -35,7 +35,7 @@ jobs:
|
||||
tailnet-integration: ${{ steps.filter.outputs.tailnet-integration }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -157,7 +157,7 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -191,7 +191,7 @@ jobs:
|
||||
|
||||
# Check for any typos
|
||||
- name: Check for typos
|
||||
uses: crate-ci/typos@07d900b8fa1097806b8adb6391b0d3e0ac2fdea7 # v1.39.0
|
||||
uses: crate-ci/typos@80c8a4945eec0f6d464eaf9e65ed98ef085283d1 # v1.38.1
|
||||
with:
|
||||
config: .github/workflows/typos.toml
|
||||
|
||||
@@ -230,12 +230,12 @@ jobs:
|
||||
shell: bash
|
||||
|
||||
gen:
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: 8
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
if: ${{ !cancelled() }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -271,7 +271,6 @@ jobs:
|
||||
popd
|
||||
|
||||
- name: make gen
|
||||
timeout-minutes: 8
|
||||
run: |
|
||||
# Remove golden files to detect discrepancy in generated files.
|
||||
make clean/golden-files
|
||||
@@ -289,10 +288,10 @@ jobs:
|
||||
needs: changes
|
||||
if: needs.changes.outputs.offlinedocs-only == 'false' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: 7
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -316,7 +315,6 @@ jobs:
|
||||
run: go install mvdan.cc/sh/v3/cmd/shfmt@v3.7.0
|
||||
|
||||
- name: make fmt
|
||||
timeout-minutes: 7
|
||||
run: |
|
||||
PATH="${PATH}:$(go env GOPATH)/bin" \
|
||||
make --output-sync -j -B fmt
|
||||
@@ -343,7 +341,7 @@ jobs:
|
||||
- windows-2022
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -532,7 +530,7 @@ jobs:
|
||||
timeout-minutes: 25
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -581,7 +579,7 @@ jobs:
|
||||
timeout-minutes: 25
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -641,7 +639,7 @@ jobs:
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -668,7 +666,7 @@ jobs:
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -701,7 +699,7 @@ jobs:
|
||||
name: ${{ matrix.variant.name }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -773,7 +771,7 @@ jobs:
|
||||
if: needs.changes.outputs.site == 'true' || needs.changes.outputs.ci == 'true'
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -794,7 +792,7 @@ jobs:
|
||||
# the check to pass. This is desired in PRs, but not in mainline.
|
||||
- name: Publish to Chromatic (non-mainline)
|
||||
if: github.ref != 'refs/heads/main' && github.repository_owner == 'coder'
|
||||
uses: chromaui/action@ac86f2ff0a458ffbce7b40698abd44c0fa34d4b6 # v13.3.3
|
||||
uses: chromaui/action@bc2d84ad2b60813a67d995c5582d696104a19383 # v13.3.2
|
||||
env:
|
||||
NODE_OPTIONS: "--max_old_space_size=4096"
|
||||
STORYBOOK: true
|
||||
@@ -826,7 +824,7 @@ jobs:
|
||||
# infinitely "in progress" in mainline unless we re-review each build.
|
||||
- name: Publish to Chromatic (mainline)
|
||||
if: github.ref == 'refs/heads/main' && github.repository_owner == 'coder'
|
||||
uses: chromaui/action@ac86f2ff0a458ffbce7b40698abd44c0fa34d4b6 # v13.3.3
|
||||
uses: chromaui/action@bc2d84ad2b60813a67d995c5582d696104a19383 # v13.3.2
|
||||
env:
|
||||
NODE_OPTIONS: "--max_old_space_size=4096"
|
||||
STORYBOOK: true
|
||||
@@ -854,7 +852,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -925,7 +923,7 @@ jobs:
|
||||
if: always()
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -1045,7 +1043,7 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -1100,7 +1098,7 @@ jobs:
|
||||
IMAGE: ghcr.io/coder/coder-preview:${{ steps.build-docker.outputs.tag }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -1497,7 +1495,7 @@ jobs:
|
||||
if: needs.changes.outputs.db == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
verdict: ${{ steps.check.outputs.verdict }} # DEPLOY or NOOP
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -65,7 +65,7 @@ jobs:
|
||||
packages: write # to retag image as dogfood
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -92,7 +92,7 @@ jobs:
|
||||
uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db # v3.0.1
|
||||
|
||||
- name: Set up Flux CLI
|
||||
uses: fluxcd/flux2/action@b6e76ca2534f76dcb8dd94fb057cdfa923c3b641 # v2.7.3
|
||||
uses: fluxcd/flux2/action@4a15fa6a023259353ef750acf1c98fe88407d4d0 # v2.7.2
|
||||
with:
|
||||
# Keep this and the github action up to date with the version of flux installed in dogfood cluster
|
||||
version: "2.7.0"
|
||||
@@ -146,7 +146,7 @@ jobs:
|
||||
needs: deploy
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ jobs:
|
||||
if: github.repository_owner == 'coder'
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ jobs:
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- uses: tj-actions/changed-files@70069877f29101175ed2b055d210fe8b1d54d7d7 # v45.0.7
|
||||
- uses: tj-actions/changed-files@dbf178ceecb9304128c8e0648591d71208c6e2c9 # v45.0.7
|
||||
id: changed-files
|
||||
with:
|
||||
files: |
|
||||
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-4' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -125,7 +125,7 @@ jobs:
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ jobs:
|
||||
- windows-2022
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ jobs:
|
||||
packages: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
PR_OPEN: ${{ steps.check_pr.outputs.pr_open }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -76,7 +76,7 @@ jobs:
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -184,7 +184,7 @@ jobs:
|
||||
pull-requests: write # needed for commenting on PRs
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -228,7 +228,7 @@ jobs:
|
||||
CODER_IMAGE_TAG: ${{ needs.get_info.outputs.CODER_IMAGE_TAG }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -288,7 +288,7 @@ jobs:
|
||||
PR_HOSTNAME: "pr${{ needs.get_info.outputs.PR_NUMBER }}.${{ secrets.PR_DEPLOYMENTS_DOMAIN }}"
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -164,7 +164,7 @@ jobs:
|
||||
version: ${{ steps.version.outputs.version }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -802,7 +802,7 @@ jobs:
|
||||
# TODO: skip this if it's not a new release (i.e. a backport). This is
|
||||
# fine right now because it just makes a PR that we can close.
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -878,7 +878,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -971,7 +971,7 @@ jobs:
|
||||
if: ${{ !inputs.dry_run }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -47,6 +47,6 @@ jobs:
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@0499de31b99561a6d14a36a5f662c2a54f91beee # v3.29.5
|
||||
uses: github/codeql-action/upload-sarif@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v3.29.5
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
||||
@@ -27,7 +27,7 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@0499de31b99561a6d14a36a5f662c2a54f91beee # v3.29.5
|
||||
uses: github/codeql-action/init@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v3.29.5
|
||||
with:
|
||||
languages: go, javascript
|
||||
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
rm Makefile
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@0499de31b99561a6d14a36a5f662c2a54f91beee # v3.29.5
|
||||
uses: github/codeql-action/analyze@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v3.29.5
|
||||
|
||||
- name: Send Slack notification on failure
|
||||
if: ${{ failure() }}
|
||||
@@ -69,7 +69,7 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -154,7 +154,7 @@ jobs:
|
||||
severity: "CRITICAL,HIGH"
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@0499de31b99561a6d14a36a5f662c2a54f91beee # v3.29.5
|
||||
uses: github/codeql-action/upload-sarif@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v3.29.5
|
||||
with:
|
||||
sarif_file: trivy-results.sarif
|
||||
category: "Trivy"
|
||||
|
||||
@@ -18,7 +18,7 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -96,7 +96,7 @@ jobs:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -120,12 +120,12 @@ jobs:
|
||||
actions: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Delete PR Cleanup workflow runs
|
||||
uses: Mattraks/delete-workflow-runs@5bf9a1dac5c4d041c029f0a8370ddf0c5cb5aeb7 # v2.1.0
|
||||
uses: Mattraks/delete-workflow-runs@ab482449ba468316e9a8801e092d0405715c5e6d # v2.1.0
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
repository: ${{ github.repository }}
|
||||
@@ -134,7 +134,7 @@ jobs:
|
||||
delete_workflow_pattern: pr-cleanup.yaml
|
||||
|
||||
- name: Delete PR Deploy workflow skipped runs
|
||||
uses: Mattraks/delete-workflow-runs@5bf9a1dac5c4d041c029f0a8370ddf0c5cb5aeb7 # v2.1.0
|
||||
uses: Mattraks/delete-workflow-runs@ab482449ba468316e9a8801e092d0405715c5e6d # v2.1.0
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
repository: ${{ github.repository }}
|
||||
|
||||
@@ -17,8 +17,8 @@ on:
|
||||
type: string
|
||||
template_preset:
|
||||
description: "Template preset to use"
|
||||
required: false
|
||||
default: ""
|
||||
required: true
|
||||
default: "none"
|
||||
type: string
|
||||
prefix:
|
||||
description: "Prefix for workspace name"
|
||||
@@ -67,7 +67,7 @@ jobs:
|
||||
GITHUB_EVENT_USER_LOGIN: ${{ github.event.sender.login }}
|
||||
INPUTS_ISSUE_URL: ${{ inputs.issue_url }}
|
||||
INPUTS_TEMPLATE_NAME: ${{ inputs.template_name || 'coder' }}
|
||||
INPUTS_TEMPLATE_PRESET: ${{ inputs.template_preset || ''}}
|
||||
INPUTS_TEMPLATE_PRESET: ${{ inputs.template_preset || 'none'}}
|
||||
INPUTS_PREFIX: ${{ inputs.prefix || 'traiage' }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
@@ -124,7 +124,7 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Extract context key and description from issue
|
||||
- name: Extract context key from issue
|
||||
id: extract-context
|
||||
env:
|
||||
ISSUE_URL: ${{ steps.determine-inputs.outputs.issue_url }}
|
||||
@@ -132,59 +132,86 @@ jobs:
|
||||
run: |
|
||||
issue_number="$(gh issue view "${ISSUE_URL}" --json number --jq '.number')"
|
||||
context_key="gh-${issue_number}"
|
||||
|
||||
TASK_PROMPT=$(cat <<EOF
|
||||
Fix ${ISSUE_URL}
|
||||
|
||||
1. Use the gh CLI to read the issue description and comments.
|
||||
2. Think carefully and try to understand the root cause. If the issue is unclear or not well defined, ask me to clarify and provide more information.
|
||||
3. Write a proposed implementation plan to PLAN.md for me to review before starting implementation. Your plan should use TDD and only make the minimal changes necessary to fix the root cause.
|
||||
4. When I approve your plan, start working on it. If you encounter issues with the plan, ask me for clarification and update the plan as required.
|
||||
5. When you have finished implementation according to the plan, commit and push your changes, and create a PR using the gh CLI for me to review.
|
||||
|
||||
EOF
|
||||
)
|
||||
|
||||
echo "context_key=${context_key}" >> "${GITHUB_OUTPUT}"
|
||||
{
|
||||
echo "TASK_PROMPT<<EOF"
|
||||
echo "${TASK_PROMPT}"
|
||||
echo "EOF"
|
||||
} >> "${GITHUB_OUTPUT}"
|
||||
echo "CONTEXT_KEY=${context_key}" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Download and install Coder binary
|
||||
shell: bash
|
||||
env:
|
||||
CODER_URL: ${{ secrets.TRAIAGE_CODER_URL }}
|
||||
run: |
|
||||
if [ "${{ runner.arch }}" == "ARM64" ]; then
|
||||
ARCH="arm64"
|
||||
else
|
||||
ARCH="amd64"
|
||||
fi
|
||||
mkdir -p "${HOME}/.local/bin"
|
||||
curl -fsSL --compressed "$CODER_URL/bin/coder-linux-${ARCH}" -o "${HOME}/.local/bin/coder"
|
||||
chmod +x "${HOME}/.local/bin/coder"
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
coder version
|
||||
coder whoami
|
||||
echo "$HOME/.local/bin" >> "${GITHUB_PATH}"
|
||||
|
||||
- name: Get Coder username from GitHub actor
|
||||
id: get-coder-username
|
||||
env:
|
||||
CODER_SESSION_TOKEN: ${{ secrets.TRAIAGE_CODER_SESSION_TOKEN }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
GITHUB_USER_ID: ${{ steps.determine-inputs.outputs.github_user_id }}
|
||||
run: |
|
||||
user_json=$(
|
||||
coder users list --github-user-id="${GITHUB_USER_ID}" --output=json
|
||||
)
|
||||
coder_username=$(jq -r 'first | .username' <<< "$user_json")
|
||||
[[ -z "${coder_username}" || "${coder_username}" == "null" ]] && echo "No Coder user with GitHub user ID ${GITHUB_USER_ID} found" && exit 1
|
||||
echo "coder_username=${coder_username}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 1
|
||||
path: ./.github/actions/create-task-action
|
||||
persist-credentials: false
|
||||
ref: main
|
||||
repository: coder/create-task-action
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create Coder Task
|
||||
id: create_task
|
||||
uses: ./.github/actions/create-task-action
|
||||
with:
|
||||
coder-url: ${{ secrets.TRAIAGE_CODER_URL }}
|
||||
coder-token: ${{ secrets.TRAIAGE_CODER_SESSION_TOKEN }}
|
||||
coder-organization: "default"
|
||||
coder-template-name: coder
|
||||
coder-template-preset: ${{ steps.determine-inputs.outputs.template_preset }}
|
||||
coder-task-name-prefix: gh-coder
|
||||
coder-task-prompt: ${{ steps.extract-context.outputs.task_prompt }}
|
||||
github-user-id: ${{ steps.determine-inputs.outputs.github_user_id }}
|
||||
github-token: ${{ github.token }}
|
||||
github-issue-url: ${{ steps.determine-inputs.outputs.issue_url }}
|
||||
comment-on-issue: ${{ startsWith(steps.determine-inputs.outputs.issue_url, format('{0}/{1}', github.server_url, github.repository)) }}
|
||||
|
||||
- name: Write outputs
|
||||
# TODO(Cian): this is a good use-case for 'recipes'
|
||||
- name: Create Coder task
|
||||
id: create-task
|
||||
env:
|
||||
TASK_CREATED: ${{ steps.create_task.outputs.task-created }}
|
||||
TASK_NAME: ${{ steps.create_task.outputs.task-name }}
|
||||
TASK_URL: ${{ steps.create_task.outputs.task-url }}
|
||||
CODER_USERNAME: ${{ steps.get-coder-username.outputs.coder_username }}
|
||||
CONTEXT_KEY: ${{ steps.extract-context.outputs.context_key }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
ISSUE_URL: ${{ steps.determine-inputs.outputs.issue_url }}
|
||||
PREFIX: ${{ steps.determine-inputs.outputs.prefix }}
|
||||
RUN_ID: ${{ github.run_id }}
|
||||
TEMPLATE_NAME: ${{ steps.determine-inputs.outputs.template_name }}
|
||||
TEMPLATE_PARAMETERS: ${{ secrets.TRAIAGE_TEMPLATE_PARAMETERS }}
|
||||
TEMPLATE_PRESET: ${{ steps.determine-inputs.outputs.template_preset }}
|
||||
run: |
|
||||
{
|
||||
echo "**Task created:** ${TASK_CREATED}"
|
||||
echo "**Task name:** ${TASK_NAME}"
|
||||
echo "**Task URL**: ${TASK_URL}"
|
||||
} >> "${GITHUB_STEP_SUMMARY}"
|
||||
# Fetch issue description using `gh` CLI
|
||||
#shellcheck disable=SC2016 # The template string should not be subject to shell expansion
|
||||
issue_description=$(gh issue view "${ISSUE_URL}" \
|
||||
--json 'title,body,comments' \
|
||||
--template '{{printf "%s\n\n%s\n\nComments:\n" .title .body}}{{range $k, $v := .comments}} - {{index $v.author "login"}}: {{printf "%s\n" $v.body}}{{end}}')
|
||||
|
||||
# Write a prompt to PROMPT_FILE
|
||||
PROMPT=$(cat <<EOF
|
||||
Fix ${ISSUE_URL}
|
||||
|
||||
Analyze the below GitHub issue description, understand the root cause, and make appropriate changes to resolve the issue.
|
||||
---
|
||||
${issue_description}
|
||||
EOF
|
||||
)
|
||||
export PROMPT
|
||||
|
||||
export TASK_NAME="${PREFIX}-${CONTEXT_KEY}-${RUN_ID}"
|
||||
echo "Creating task: $TASK_NAME"
|
||||
./scripts/traiage.sh create
|
||||
if [[ "${ISSUE_URL}" == "https://github.com/${GITHUB_REPOSITORY}"* ]]; then
|
||||
gh issue comment "${ISSUE_URL}" --body "Task created: https://dev.coder.com/tasks/${CODER_USERNAME}/${TASK_NAME}" --create-if-none --edit-last
|
||||
else
|
||||
echo "Skipping comment on other repo."
|
||||
fi
|
||||
echo "TASK_NAME=${CODER_USERNAME}/${TASK_NAME}" >> "${GITHUB_OUTPUT}"
|
||||
echo "TASK_NAME=${CODER_USERNAME}/${TASK_NAME}" >> "${GITHUB_ENV}"
|
||||
|
||||
@@ -21,7 +21,7 @@ jobs:
|
||||
pull-requests: write # required to post PR review comments by the action
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -91,6 +91,3 @@ __debug_bin*
|
||||
**/.claude/settings.local.json
|
||||
|
||||
/.env
|
||||
|
||||
# Ignore plans written by AI agents.
|
||||
PLAN.md
|
||||
|
||||
+1
-3
@@ -250,9 +250,7 @@ func (a *agent) editFile(ctx context.Context, path string, edits []workspacesdk.
|
||||
transforms[i] = replace.String(edit.Search, edit.Replace)
|
||||
}
|
||||
|
||||
// Create an adjacent file to ensure it will be on the same device and can be
|
||||
// moved atomically.
|
||||
tmpfile, err := afero.TempFile(a.filesystem, filepath.Dir(path), filepath.Base(path))
|
||||
tmpfile, err := afero.TempFile(a.filesystem, "", filepath.Base(path))
|
||||
if err != nil {
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
|
||||
+17
-45
@@ -11,7 +11,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -202,15 +201,18 @@ func workspaceAgent() *serpent.Command {
|
||||
// Enable pprof handler
|
||||
// This prevents the pprof import from being accidentally deleted.
|
||||
_ = pprof.Handler
|
||||
if pprofAddress != "" {
|
||||
pprofSrvClose := ServeHandler(ctx, logger, nil, pprofAddress, "pprof")
|
||||
defer pprofSrvClose()
|
||||
pprofSrvClose := ServeHandler(ctx, logger, nil, pprofAddress, "pprof")
|
||||
defer pprofSrvClose()
|
||||
if port, err := extractPort(pprofAddress); err == nil {
|
||||
ignorePorts[port] = "pprof"
|
||||
}
|
||||
|
||||
if port, err := extractPort(pprofAddress); err == nil {
|
||||
ignorePorts[port] = "pprof"
|
||||
}
|
||||
} else {
|
||||
logger.Debug(ctx, "pprof address is empty, disabling pprof server")
|
||||
if port, err := extractPort(prometheusAddress); err == nil {
|
||||
ignorePorts[port] = "prometheus"
|
||||
}
|
||||
|
||||
if port, err := extractPort(debugAddress); err == nil {
|
||||
ignorePorts[port] = "debug"
|
||||
}
|
||||
|
||||
executablePath, err := os.Executable()
|
||||
@@ -274,28 +276,6 @@ func workspaceAgent() *serpent.Command {
|
||||
for {
|
||||
prometheusRegistry := prometheus.NewRegistry()
|
||||
|
||||
promHandler := agent.PrometheusMetricsHandler(prometheusRegistry, logger)
|
||||
var serverClose []func()
|
||||
if prometheusAddress != "" {
|
||||
prometheusSrvClose := ServeHandler(ctx, logger, promHandler, prometheusAddress, "prometheus")
|
||||
serverClose = append(serverClose, prometheusSrvClose)
|
||||
|
||||
if port, err := extractPort(prometheusAddress); err == nil {
|
||||
ignorePorts[port] = "prometheus"
|
||||
}
|
||||
} else {
|
||||
logger.Debug(ctx, "prometheus address is empty, disabling prometheus server")
|
||||
}
|
||||
|
||||
if debugAddress != "" {
|
||||
// ServerHandle depends on `agnt.HTTPDebug()`, but `agnt`
|
||||
// depends on `ignorePorts`. Keep this if statement in sync
|
||||
// with below.
|
||||
if port, err := extractPort(debugAddress); err == nil {
|
||||
ignorePorts[port] = "debug"
|
||||
}
|
||||
}
|
||||
|
||||
agnt := agent.New(agent.Options{
|
||||
Client: client,
|
||||
Logger: logger,
|
||||
@@ -319,15 +299,10 @@ func workspaceAgent() *serpent.Command {
|
||||
},
|
||||
})
|
||||
|
||||
if debugAddress != "" {
|
||||
// ServerHandle depends on `agnt.HTTPDebug()`, but `agnt`
|
||||
// depends on `ignorePorts`. Keep this if statement in sync
|
||||
// with above.
|
||||
debugSrvClose := ServeHandler(ctx, logger, agnt.HTTPDebug(), debugAddress, "debug")
|
||||
serverClose = append(serverClose, debugSrvClose)
|
||||
} else {
|
||||
logger.Debug(ctx, "debug address is empty, disabling debug server")
|
||||
}
|
||||
promHandler := agent.PrometheusMetricsHandler(prometheusRegistry, logger)
|
||||
prometheusSrvClose := ServeHandler(ctx, logger, promHandler, prometheusAddress, "prometheus")
|
||||
|
||||
debugSrvClose := ServeHandler(ctx, logger, agnt.HTTPDebug(), debugAddress, "debug")
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -339,11 +314,8 @@ func workspaceAgent() *serpent.Command {
|
||||
}
|
||||
|
||||
lastErr = agnt.Close()
|
||||
|
||||
slices.Reverse(serverClose)
|
||||
for _, closeFunc := range serverClose {
|
||||
closeFunc()
|
||||
}
|
||||
debugSrvClose()
|
||||
prometheusSrvClose()
|
||||
|
||||
if mustExit {
|
||||
break
|
||||
|
||||
@@ -178,51 +178,6 @@ func TestWorkspaceAgent(t *testing.T) {
|
||||
require.Greater(t, atomic.LoadInt64(&called), int64(0), "expected coderd to be reached with custom headers")
|
||||
require.Greater(t, atomic.LoadInt64(&derpCalled), int64(0), "expected /derp to be called with custom headers")
|
||||
})
|
||||
|
||||
t.Run("DisabledServers", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, db := coderdtest.NewWithDatabase(t, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
r := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).WithAgent().Do()
|
||||
|
||||
logDir := t.TempDir()
|
||||
inv, _ := clitest.New(t,
|
||||
"agent",
|
||||
"--auth", "token",
|
||||
"--agent-token", r.AgentToken,
|
||||
"--agent-url", client.URL.String(),
|
||||
"--log-dir", logDir,
|
||||
"--pprof-address", "",
|
||||
"--prometheus-address", "",
|
||||
"--debug-address", "",
|
||||
)
|
||||
|
||||
clitest.Start(t, inv)
|
||||
|
||||
// Verify the agent is connected and working.
|
||||
resources := coderdtest.NewWorkspaceAgentWaiter(t, client, r.Workspace.ID).
|
||||
MatchResources(matchAgentWithVersion).Wait()
|
||||
require.Len(t, resources, 1)
|
||||
require.Len(t, resources[0].Agents, 1)
|
||||
require.NotEmpty(t, resources[0].Agents[0].Version)
|
||||
|
||||
// Verify the servers are not listening by checking the log for disabled
|
||||
// messages.
|
||||
require.Eventually(t, func() bool {
|
||||
logContent, err := os.ReadFile(filepath.Join(logDir, "coder-agent.log"))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
logStr := string(logContent)
|
||||
return strings.Contains(logStr, "pprof address is empty, disabling pprof server") &&
|
||||
strings.Contains(logStr, "prometheus address is empty, disabling prometheus server") &&
|
||||
strings.Contains(logStr, "debug address is empty, disabling debug server")
|
||||
}, testutil.WaitLong, testutil.IntervalMedium)
|
||||
})
|
||||
}
|
||||
|
||||
func matchAgentWithVersion(rs []codersdk.WorkspaceResource) bool {
|
||||
|
||||
+59
-86
@@ -384,88 +384,6 @@ func (s *scaletestPrometheusFlags) attach(opts *serpent.OptionSet) {
|
||||
)
|
||||
}
|
||||
|
||||
// workspaceTargetFlags holds common flags for targeting specific workspaces in scale tests.
|
||||
type workspaceTargetFlags struct {
|
||||
template string
|
||||
targetWorkspaces string
|
||||
useHostLogin bool
|
||||
}
|
||||
|
||||
// attach adds the workspace target flags to the given options set.
|
||||
func (f *workspaceTargetFlags) attach(opts *serpent.OptionSet) {
|
||||
*opts = append(*opts,
|
||||
serpent.Option{
|
||||
Flag: "template",
|
||||
FlagShorthand: "t",
|
||||
Env: "CODER_SCALETEST_TEMPLATE",
|
||||
Description: "Name or ID of the template. Traffic generation will be limited to workspaces created from this template.",
|
||||
Value: serpent.StringOf(&f.template),
|
||||
},
|
||||
serpent.Option{
|
||||
Flag: "target-workspaces",
|
||||
Env: "CODER_SCALETEST_TARGET_WORKSPACES",
|
||||
Description: "Target a specific range of workspaces in the format [START]:[END] (exclusive). Example: 0:10 will target the 10 first alphabetically sorted workspaces (0-9).",
|
||||
Value: serpent.StringOf(&f.targetWorkspaces),
|
||||
},
|
||||
serpent.Option{
|
||||
Flag: "use-host-login",
|
||||
Env: "CODER_SCALETEST_USE_HOST_LOGIN",
|
||||
Default: "false",
|
||||
Description: "Connect as the currently logged in user.",
|
||||
Value: serpent.BoolOf(&f.useHostLogin),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// getTargetedWorkspaces retrieves the workspaces based on the template filter and target range. warnWriter is where to
|
||||
// write a warning message if any workspaces were skipped due to ownership mismatch.
|
||||
func (f *workspaceTargetFlags) getTargetedWorkspaces(ctx context.Context, client *codersdk.Client, organizationIDs []uuid.UUID, warnWriter io.Writer) ([]codersdk.Workspace, error) {
|
||||
// Validate template if provided
|
||||
if f.template != "" {
|
||||
_, err := parseTemplate(ctx, client, organizationIDs, f.template)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse template: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Parse target range
|
||||
targetStart, targetEnd, err := parseTargetRange("workspaces", f.targetWorkspaces)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse target workspaces: %w", err)
|
||||
}
|
||||
|
||||
// Determine owner based on useHostLogin
|
||||
var owner string
|
||||
if f.useHostLogin {
|
||||
owner = codersdk.Me
|
||||
}
|
||||
|
||||
// Get workspaces
|
||||
workspaces, numSkipped, err := getScaletestWorkspaces(ctx, client, owner, f.template)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if numSkipped > 0 {
|
||||
cliui.Warnf(warnWriter, "CODER_DISABLE_OWNER_WORKSPACE_ACCESS is set on the deployment.\n\t%d workspace(s) were skipped due to ownership mismatch.\n\tSet --use-host-login to only target workspaces you own.", numSkipped)
|
||||
}
|
||||
|
||||
// Adjust targetEnd if not specified
|
||||
if targetEnd == 0 {
|
||||
targetEnd = len(workspaces)
|
||||
}
|
||||
|
||||
// Validate range
|
||||
if len(workspaces) == 0 {
|
||||
return nil, xerrors.Errorf("no scaletest workspaces exist")
|
||||
}
|
||||
if targetEnd > len(workspaces) {
|
||||
return nil, xerrors.Errorf("target workspace end %d is greater than the number of workspaces %d", targetEnd, len(workspaces))
|
||||
}
|
||||
|
||||
// Return the sliced workspaces
|
||||
return workspaces[targetStart:targetEnd], nil
|
||||
}
|
||||
|
||||
func requireAdmin(ctx context.Context, client *codersdk.Client) (codersdk.User, error) {
|
||||
me, err := client.User(ctx, codersdk.Me)
|
||||
if err != nil {
|
||||
@@ -1275,10 +1193,12 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
bytesPerTick int64
|
||||
ssh bool
|
||||
disableDirect bool
|
||||
useHostLogin bool
|
||||
app string
|
||||
template string
|
||||
targetWorkspaces string
|
||||
workspaceProxyURL string
|
||||
|
||||
targetFlags = &workspaceTargetFlags{}
|
||||
tracingFlags = &scaletestTracingFlags{}
|
||||
strategy = &scaletestStrategyFlags{}
|
||||
cleanupStrategy = newScaletestCleanupStrategy()
|
||||
@@ -1323,9 +1243,15 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
},
|
||||
}
|
||||
|
||||
workspaces, err := targetFlags.getTargetedWorkspaces(ctx, client, me.OrganizationIDs, inv.Stdout)
|
||||
if template != "" {
|
||||
_, err := parseTemplate(ctx, client, me.OrganizationIDs, template)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse template: %w", err)
|
||||
}
|
||||
}
|
||||
targetWorkspaceStart, targetWorkspaceEnd, err := parseTargetRange("workspaces", targetWorkspaces)
|
||||
if err != nil {
|
||||
return err
|
||||
return xerrors.Errorf("parse target workspaces: %w", err)
|
||||
}
|
||||
|
||||
appHost, err := client.AppHost(ctx)
|
||||
@@ -1333,6 +1259,30 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
return xerrors.Errorf("get app host: %w", err)
|
||||
}
|
||||
|
||||
var owner string
|
||||
if useHostLogin {
|
||||
owner = codersdk.Me
|
||||
}
|
||||
|
||||
workspaces, numSkipped, err := getScaletestWorkspaces(inv.Context(), client, owner, template)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if numSkipped > 0 {
|
||||
cliui.Warnf(inv.Stdout, "CODER_DISABLE_OWNER_WORKSPACE_ACCESS is set on the deployment.\n\t%d workspace(s) were skipped due to ownership mismatch.\n\tSet --use-host-login to only target workspaces you own.", numSkipped)
|
||||
}
|
||||
|
||||
if targetWorkspaceEnd == 0 {
|
||||
targetWorkspaceEnd = len(workspaces)
|
||||
}
|
||||
|
||||
if len(workspaces) == 0 {
|
||||
return xerrors.Errorf("no scaletest workspaces exist")
|
||||
}
|
||||
if targetWorkspaceEnd > len(workspaces) {
|
||||
return xerrors.Errorf("target workspace end %d is greater than the number of workspaces %d", targetWorkspaceEnd, len(workspaces))
|
||||
}
|
||||
|
||||
tracerProvider, closeTracing, tracingEnabled, err := tracingFlags.provider(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create tracer provider: %w", err)
|
||||
@@ -1357,6 +1307,10 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
|
||||
th := harness.NewTestHarness(strategy.toStrategy(), cleanupStrategy.toStrategy())
|
||||
for idx, ws := range workspaces {
|
||||
if idx < targetWorkspaceStart || idx >= targetWorkspaceEnd {
|
||||
continue
|
||||
}
|
||||
|
||||
var (
|
||||
agent codersdk.WorkspaceAgent
|
||||
name = "workspace-traffic"
|
||||
@@ -1461,6 +1415,19 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
}
|
||||
|
||||
cmd.Options = []serpent.Option{
|
||||
{
|
||||
Flag: "template",
|
||||
FlagShorthand: "t",
|
||||
Env: "CODER_SCALETEST_TEMPLATE",
|
||||
Description: "Name or ID of the template. Traffic generation will be limited to workspaces created from this template.",
|
||||
Value: serpent.StringOf(&template),
|
||||
},
|
||||
{
|
||||
Flag: "target-workspaces",
|
||||
Env: "CODER_SCALETEST_TARGET_WORKSPACES",
|
||||
Description: "Target a specific range of workspaces in the format [START]:[END] (exclusive). Example: 0:10 will target the 10 first alphabetically sorted workspaces (0-9).",
|
||||
Value: serpent.StringOf(&targetWorkspaces),
|
||||
},
|
||||
{
|
||||
Flag: "bytes-per-tick",
|
||||
Env: "CODER_SCALETEST_WORKSPACE_TRAFFIC_BYTES_PER_TICK",
|
||||
@@ -1496,6 +1463,13 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
Description: "Send WebSocket traffic to a workspace app (proxied via coderd), cannot be used with --ssh.",
|
||||
Value: serpent.StringOf(&app),
|
||||
},
|
||||
{
|
||||
Flag: "use-host-login",
|
||||
Env: "CODER_SCALETEST_USE_HOST_LOGIN",
|
||||
Default: "false",
|
||||
Description: "Connect as the currently logged in user.",
|
||||
Value: serpent.BoolOf(&useHostLogin),
|
||||
},
|
||||
{
|
||||
Flag: "workspace-proxy-url",
|
||||
Env: "CODER_SCALETEST_WORKSPACE_PROXY_URL",
|
||||
@@ -1505,7 +1479,6 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
},
|
||||
}
|
||||
|
||||
targetFlags.attach(&cmd.Options)
|
||||
tracingFlags.attach(&cmd.Options)
|
||||
strategy.attach(&cmd.Options)
|
||||
cleanupStrategy.attach(&cmd.Options)
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@@ -30,13 +29,12 @@ import (
|
||||
|
||||
func (r *RootCmd) scaletestNotifications() *serpent.Command {
|
||||
var (
|
||||
userCount int64
|
||||
templateAdminPercentage float64
|
||||
notificationTimeout time.Duration
|
||||
smtpRequestTimeout time.Duration
|
||||
dialTimeout time.Duration
|
||||
noCleanup bool
|
||||
smtpAPIURL string
|
||||
userCount int64
|
||||
ownerUserPercentage float64
|
||||
notificationTimeout time.Duration
|
||||
dialTimeout time.Duration
|
||||
noCleanup bool
|
||||
smtpAPIURL string
|
||||
|
||||
tracingFlags = &scaletestTracingFlags{}
|
||||
|
||||
@@ -79,24 +77,24 @@ func (r *RootCmd) scaletestNotifications() *serpent.Command {
|
||||
return xerrors.Errorf("--user-count must be greater than 0")
|
||||
}
|
||||
|
||||
if templateAdminPercentage < 0 || templateAdminPercentage > 100 {
|
||||
return xerrors.Errorf("--template-admin-percentage must be between 0 and 100")
|
||||
if ownerUserPercentage < 0 || ownerUserPercentage > 100 {
|
||||
return xerrors.Errorf("--owner-user-percentage must be between 0 and 100")
|
||||
}
|
||||
|
||||
if smtpAPIURL != "" && !strings.HasPrefix(smtpAPIURL, "http://") && !strings.HasPrefix(smtpAPIURL, "https://") {
|
||||
return xerrors.Errorf("--smtp-api-url must start with http:// or https://")
|
||||
}
|
||||
|
||||
templateAdminCount := int64(float64(userCount) * templateAdminPercentage / 100)
|
||||
if templateAdminCount == 0 && templateAdminPercentage > 0 {
|
||||
templateAdminCount = 1
|
||||
ownerUserCount := int64(float64(userCount) * ownerUserPercentage / 100)
|
||||
if ownerUserCount == 0 && ownerUserPercentage > 0 {
|
||||
ownerUserCount = 1
|
||||
}
|
||||
regularUserCount := userCount - templateAdminCount
|
||||
regularUserCount := userCount - ownerUserCount
|
||||
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "Distribution plan:\n")
|
||||
_, _ = fmt.Fprintf(inv.Stderr, " Total users: %d\n", userCount)
|
||||
_, _ = fmt.Fprintf(inv.Stderr, " Template admins: %d (%.1f%%)\n", templateAdminCount, templateAdminPercentage)
|
||||
_, _ = fmt.Fprintf(inv.Stderr, " Regular users: %d (%.1f%%)\n", regularUserCount, 100.0-templateAdminPercentage)
|
||||
_, _ = fmt.Fprintf(inv.Stderr, " Owner users: %d (%.1f%%)\n", ownerUserCount, ownerUserPercentage)
|
||||
_, _ = fmt.Fprintf(inv.Stderr, " Regular users: %d (%.1f%%)\n", regularUserCount, 100.0-ownerUserPercentage)
|
||||
|
||||
outputs, err := output.parse()
|
||||
if err != nil {
|
||||
@@ -129,12 +127,13 @@ func (r *RootCmd) scaletestNotifications() *serpent.Command {
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "Creating users...")
|
||||
|
||||
dialBarrier := &sync.WaitGroup{}
|
||||
templateAdminWatchBarrier := &sync.WaitGroup{}
|
||||
ownerWatchBarrier := &sync.WaitGroup{}
|
||||
dialBarrier.Add(int(userCount))
|
||||
templateAdminWatchBarrier.Add(int(templateAdminCount))
|
||||
ownerWatchBarrier.Add(int(ownerUserCount))
|
||||
|
||||
expectedNotificationIDs := map[uuid.UUID]struct{}{
|
||||
notificationsLib.TemplateTemplateDeleted: {},
|
||||
notificationsLib.TemplateUserAccountCreated: {},
|
||||
notificationsLib.TemplateUserAccountDeleted: {},
|
||||
}
|
||||
|
||||
triggerTimes := make(map[uuid.UUID]chan time.Time, len(expectedNotificationIDs))
|
||||
@@ -143,20 +142,19 @@ func (r *RootCmd) scaletestNotifications() *serpent.Command {
|
||||
}
|
||||
|
||||
configs := make([]notifications.Config, 0, userCount)
|
||||
for range templateAdminCount {
|
||||
for range ownerUserCount {
|
||||
config := notifications.Config{
|
||||
User: createusers.Config{
|
||||
OrganizationID: me.OrganizationIDs[0],
|
||||
},
|
||||
Roles: []string{codersdk.RoleTemplateAdmin},
|
||||
Roles: []string{codersdk.RoleOwner},
|
||||
NotificationTimeout: notificationTimeout,
|
||||
DialTimeout: dialTimeout,
|
||||
DialBarrier: dialBarrier,
|
||||
ReceivingWatchBarrier: templateAdminWatchBarrier,
|
||||
ReceivingWatchBarrier: ownerWatchBarrier,
|
||||
ExpectedNotificationsIDs: expectedNotificationIDs,
|
||||
Metrics: metrics,
|
||||
SMTPApiURL: smtpAPIURL,
|
||||
SMTPRequestTimeout: smtpRequestTimeout,
|
||||
}
|
||||
if err := config.Validate(); err != nil {
|
||||
return xerrors.Errorf("validate config: %w", err)
|
||||
@@ -172,8 +170,9 @@ func (r *RootCmd) scaletestNotifications() *serpent.Command {
|
||||
NotificationTimeout: notificationTimeout,
|
||||
DialTimeout: dialTimeout,
|
||||
DialBarrier: dialBarrier,
|
||||
ReceivingWatchBarrier: templateAdminWatchBarrier,
|
||||
ReceivingWatchBarrier: ownerWatchBarrier,
|
||||
Metrics: metrics,
|
||||
SMTPApiURL: smtpAPIURL,
|
||||
}
|
||||
if err := config.Validate(); err != nil {
|
||||
return xerrors.Errorf("validate config: %w", err)
|
||||
@@ -181,7 +180,7 @@ func (r *RootCmd) scaletestNotifications() *serpent.Command {
|
||||
configs = append(configs, config)
|
||||
}
|
||||
|
||||
go triggerNotifications(
|
||||
go triggerUserNotifications(
|
||||
ctx,
|
||||
logger,
|
||||
client,
|
||||
@@ -262,30 +261,23 @@ func (r *RootCmd) scaletestNotifications() *serpent.Command {
|
||||
Required: true,
|
||||
},
|
||||
{
|
||||
Flag: "template-admin-percentage",
|
||||
Env: "CODER_SCALETEST_NOTIFICATION_TEMPLATE_ADMIN_PERCENTAGE",
|
||||
Flag: "owner-user-percentage",
|
||||
Env: "CODER_SCALETEST_NOTIFICATION_OWNER_USER_PERCENTAGE",
|
||||
Default: "20.0",
|
||||
Description: "Percentage of users to assign Template Admin role to (0-100).",
|
||||
Value: serpent.Float64Of(&templateAdminPercentage),
|
||||
Description: "Percentage of users to assign Owner role to (0-100).",
|
||||
Value: serpent.Float64Of(&ownerUserPercentage),
|
||||
},
|
||||
{
|
||||
Flag: "notification-timeout",
|
||||
Env: "CODER_SCALETEST_NOTIFICATION_TIMEOUT",
|
||||
Default: "10m",
|
||||
Default: "5m",
|
||||
Description: "How long to wait for notifications after triggering.",
|
||||
Value: serpent.DurationOf(¬ificationTimeout),
|
||||
},
|
||||
{
|
||||
Flag: "smtp-request-timeout",
|
||||
Env: "CODER_SCALETEST_SMTP_REQUEST_TIMEOUT",
|
||||
Default: "5m",
|
||||
Description: "Timeout for SMTP requests.",
|
||||
Value: serpent.DurationOf(&smtpRequestTimeout),
|
||||
},
|
||||
{
|
||||
Flag: "dial-timeout",
|
||||
Env: "CODER_SCALETEST_DIAL_TIMEOUT",
|
||||
Default: "10m",
|
||||
Default: "2m",
|
||||
Description: "Timeout for dialing the notification websocket endpoint.",
|
||||
Value: serpent.DurationOf(&dialTimeout),
|
||||
},
|
||||
@@ -387,9 +379,9 @@ func computeNotificationLatencies(
|
||||
return nil
|
||||
}
|
||||
|
||||
// triggerNotifications waits for all test users to connect,
|
||||
// then creates and deletes a test template to trigger notification events for testing.
|
||||
func triggerNotifications(
|
||||
// triggerUserNotifications waits for all test users to connect,
|
||||
// then creates and deletes a test user to trigger notification events for testing.
|
||||
func triggerUserNotifications(
|
||||
ctx context.Context,
|
||||
logger slog.Logger,
|
||||
client *codersdk.Client,
|
||||
@@ -422,49 +414,34 @@ func triggerNotifications(
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info(ctx, "creating test template to test notifications")
|
||||
const (
|
||||
triggerUsername = "scaletest-trigger-user"
|
||||
triggerEmail = "scaletest-trigger@example.com"
|
||||
)
|
||||
|
||||
// Upload empty template file.
|
||||
file, err := client.Upload(ctx, codersdk.ContentTypeTar, bytes.NewReader([]byte{}))
|
||||
if err != nil {
|
||||
logger.Error(ctx, "upload test template", slog.Error(err))
|
||||
return
|
||||
}
|
||||
logger.Info(ctx, "test template uploaded", slog.F("file_id", file.ID))
|
||||
logger.Info(ctx, "creating test user to test notifications",
|
||||
slog.F("username", triggerUsername),
|
||||
slog.F("email", triggerEmail),
|
||||
slog.F("org_id", orgID))
|
||||
|
||||
// Create template version.
|
||||
version, err := client.CreateTemplateVersion(ctx, orgID, codersdk.CreateTemplateVersionRequest{
|
||||
StorageMethod: codersdk.ProvisionerStorageMethodFile,
|
||||
FileID: file.ID,
|
||||
Provisioner: codersdk.ProvisionerTypeEcho,
|
||||
testUser, err := client.CreateUserWithOrgs(ctx, codersdk.CreateUserRequestWithOrgs{
|
||||
OrganizationIDs: []uuid.UUID{orgID},
|
||||
Username: triggerUsername,
|
||||
Email: triggerEmail,
|
||||
Password: "test-password-123",
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(ctx, "create test template version", slog.Error(err))
|
||||
logger.Error(ctx, "create test user", slog.Error(err))
|
||||
return
|
||||
}
|
||||
logger.Info(ctx, "test template version created", slog.F("template_version_id", version.ID))
|
||||
expectedNotifications[notificationsLib.TemplateUserAccountCreated] <- time.Now()
|
||||
|
||||
// Create template.
|
||||
testTemplate, err := client.CreateTemplate(ctx, orgID, codersdk.CreateTemplateRequest{
|
||||
Name: "scaletest-test-template",
|
||||
Description: "scaletest-test-template",
|
||||
VersionID: version.ID,
|
||||
})
|
||||
err = client.DeleteUser(ctx, testUser.ID)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "create test template", slog.Error(err))
|
||||
logger.Error(ctx, "delete test user", slog.Error(err))
|
||||
return
|
||||
}
|
||||
logger.Info(ctx, "test template created", slog.F("template_id", testTemplate.ID))
|
||||
|
||||
// Delete template to trigger notification.
|
||||
err = client.DeleteTemplate(ctx, testTemplate.ID)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "delete test template", slog.Error(err))
|
||||
return
|
||||
}
|
||||
logger.Info(ctx, "test template deleted", slog.F("template_id", testTemplate.ID))
|
||||
|
||||
// Record expected notification.
|
||||
expectedNotifications[notificationsLib.TemplateTemplateDeleted] <- time.Now()
|
||||
close(expectedNotifications[notificationsLib.TemplateTemplateDeleted])
|
||||
expectedNotifications[notificationsLib.TemplateUserAccountDeleted] <- time.Now()
|
||||
close(expectedNotifications[notificationsLib.TemplateUserAccountCreated])
|
||||
close(expectedNotifications[notificationsLib.TemplateUserAccountDeleted])
|
||||
}
|
||||
|
||||
+35
-18
@@ -56,14 +56,19 @@ func TestExpTaskDelete(t *testing.T) {
|
||||
taskID := uuid.MustParse(id1)
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/experimental/tasks/me/exists":
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/experimental/tasks" && r.URL.Query().Get("q") == "owner:\"me\"":
|
||||
c.nameResolves.Add(1)
|
||||
httpapi.Write(r.Context(), w, http.StatusOK,
|
||||
codersdk.Task{
|
||||
httpapi.Write(r.Context(), w, http.StatusOK, struct {
|
||||
Tasks []codersdk.Task `json:"tasks"`
|
||||
Count int `json:"count"`
|
||||
}{
|
||||
Tasks: []codersdk.Task{{
|
||||
ID: taskID,
|
||||
Name: "exists",
|
||||
OwnerName: "me",
|
||||
})
|
||||
}},
|
||||
Count: 1,
|
||||
})
|
||||
case r.Method == http.MethodDelete && r.URL.Path == "/api/experimental/tasks/me/"+id1:
|
||||
c.deleteCalls.Add(1)
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
@@ -102,21 +107,27 @@ func TestExpTaskDelete(t *testing.T) {
|
||||
name: "Multiple_YesFlag",
|
||||
args: []string{"--yes", "first", id4},
|
||||
buildHandler: func(c *testCounters) http.HandlerFunc {
|
||||
firstID := uuid.MustParse(id3)
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/experimental/tasks/me/first":
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/experimental/tasks" && r.URL.Query().Get("q") == "owner:\"me\"":
|
||||
c.nameResolves.Add(1)
|
||||
httpapi.Write(r.Context(), w, http.StatusOK, codersdk.Task{
|
||||
ID: uuid.MustParse(id3),
|
||||
Name: "first",
|
||||
OwnerName: "me",
|
||||
httpapi.Write(r.Context(), w, http.StatusOK, struct {
|
||||
Tasks []codersdk.Task `json:"tasks"`
|
||||
Count int `json:"count"`
|
||||
}{
|
||||
Tasks: []codersdk.Task{{
|
||||
ID: firstID,
|
||||
Name: "first",
|
||||
OwnerName: "me",
|
||||
}},
|
||||
Count: 1,
|
||||
})
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/experimental/tasks/me/"+id4:
|
||||
c.nameResolves.Add(1)
|
||||
httpapi.Write(r.Context(), w, http.StatusOK, codersdk.Task{
|
||||
ID: uuid.MustParse(id4),
|
||||
OwnerName: "me",
|
||||
Name: "uuid-task-4",
|
||||
Name: "uuid-task-2",
|
||||
})
|
||||
case r.Method == http.MethodDelete && r.URL.Path == "/api/experimental/tasks/me/"+id3:
|
||||
c.deleteCalls.Add(1)
|
||||
@@ -130,7 +141,7 @@ func TestExpTaskDelete(t *testing.T) {
|
||||
}
|
||||
},
|
||||
wantDeleteCalls: 2,
|
||||
wantNameResolves: 2,
|
||||
wantNameResolves: 1,
|
||||
wantDeletedMessage: 2,
|
||||
},
|
||||
{
|
||||
@@ -163,14 +174,20 @@ func TestExpTaskDelete(t *testing.T) {
|
||||
taskID := uuid.MustParse(id5)
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/experimental/tasks/me/bad":
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/experimental/tasks" && r.URL.Query().Get("q") == "owner:\"me\"":
|
||||
c.nameResolves.Add(1)
|
||||
httpapi.Write(r.Context(), w, http.StatusOK, codersdk.Task{
|
||||
ID: taskID,
|
||||
Name: "bad",
|
||||
OwnerName: "me",
|
||||
httpapi.Write(r.Context(), w, http.StatusOK, struct {
|
||||
Tasks []codersdk.Task `json:"tasks"`
|
||||
Count int `json:"count"`
|
||||
}{
|
||||
Tasks: []codersdk.Task{{
|
||||
ID: taskID,
|
||||
Name: "bad",
|
||||
OwnerName: "me",
|
||||
}},
|
||||
Count: 1,
|
||||
})
|
||||
case r.Method == http.MethodDelete && r.URL.Path == "/api/experimental/tasks/me/bad":
|
||||
case r.Method == http.MethodDelete && r.URL.Path == "/api/experimental/tasks/me/"+id5:
|
||||
httpapi.InternalServerError(w, xerrors.New("boom"))
|
||||
default:
|
||||
httpapi.InternalServerError(w, xerrors.New("unwanted path: "+r.Method+" "+r.URL.Path))
|
||||
|
||||
@@ -2,6 +2,7 @@ package cli_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"io"
|
||||
@@ -18,7 +19,10 @@ import (
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbfake"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/coderd/util/slice"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
@@ -39,22 +43,76 @@ func makeAITask(t *testing.T, db database.Store, orgID, adminID, ownerID uuid.UU
|
||||
},
|
||||
}).Do()
|
||||
|
||||
build := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
ws := database.WorkspaceTable{
|
||||
OrganizationID: orgID,
|
||||
OwnerID: ownerID,
|
||||
TemplateID: tv.Template.ID,
|
||||
}).
|
||||
}
|
||||
build := dbfake.WorkspaceBuild(t, db, ws).
|
||||
Seed(database.WorkspaceBuild{
|
||||
TemplateVersionID: tv.TemplateVersion.ID,
|
||||
Transition: transition,
|
||||
}).
|
||||
WithAgent().
|
||||
WithTask(database.TaskTable{
|
||||
Prompt: prompt,
|
||||
}, nil).
|
||||
Do()
|
||||
}).WithAgent().Do()
|
||||
dbgen.WorkspaceBuildParameters(t, db, []database.WorkspaceBuildParameter{
|
||||
{
|
||||
WorkspaceBuildID: build.Build.ID,
|
||||
Name: codersdk.AITaskPromptParameterName,
|
||||
Value: prompt,
|
||||
},
|
||||
})
|
||||
agents, err := db.GetWorkspaceAgentsByWorkspaceAndBuildNumber(
|
||||
dbauthz.AsSystemRestricted(context.Background()),
|
||||
database.GetWorkspaceAgentsByWorkspaceAndBuildNumberParams{
|
||||
WorkspaceID: build.Workspace.ID,
|
||||
BuildNumber: build.Build.BuildNumber,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, agents)
|
||||
agentID := agents[0].ID
|
||||
|
||||
return build.Task
|
||||
// Create a workspace app and set it as the sidebar app.
|
||||
app := dbgen.WorkspaceApp(t, db, database.WorkspaceApp{
|
||||
AgentID: agentID,
|
||||
Slug: "task-sidebar",
|
||||
DisplayName: "Task Sidebar",
|
||||
External: false,
|
||||
})
|
||||
|
||||
// Update build flags to reference the sidebar app and HasAITask=true.
|
||||
err = db.UpdateWorkspaceBuildFlagsByID(
|
||||
dbauthz.AsSystemRestricted(context.Background()),
|
||||
database.UpdateWorkspaceBuildFlagsByIDParams{
|
||||
ID: build.Build.ID,
|
||||
HasAITask: sql.NullBool{Bool: true, Valid: true},
|
||||
HasExternalAgent: sql.NullBool{Bool: false, Valid: false},
|
||||
SidebarAppID: uuid.NullUUID{UUID: app.ID, Valid: true},
|
||||
UpdatedAt: build.Build.UpdatedAt,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a task record in the tasks table for the new data model.
|
||||
task := dbgen.Task(t, db, database.TaskTable{
|
||||
OrganizationID: orgID,
|
||||
OwnerID: ownerID,
|
||||
Name: build.Workspace.Name,
|
||||
WorkspaceID: uuid.NullUUID{UUID: build.Workspace.ID, Valid: true},
|
||||
TemplateVersionID: tv.TemplateVersion.ID,
|
||||
TemplateParameters: []byte("{}"),
|
||||
Prompt: prompt,
|
||||
CreatedAt: dbtime.Now(),
|
||||
})
|
||||
|
||||
// Link the task to the workspace app.
|
||||
dbgen.TaskWorkspaceApp(t, db, database.TaskWorkspaceApp{
|
||||
TaskID: task.ID,
|
||||
WorkspaceBuildNumber: build.Build.BuildNumber,
|
||||
WorkspaceAgentID: uuid.NullUUID{UUID: agentID, Valid: true},
|
||||
WorkspaceAppID: uuid.NullUUID{UUID: app.ID, Valid: true},
|
||||
})
|
||||
|
||||
return task
|
||||
}
|
||||
|
||||
func TestExpTaskList(t *testing.T) {
|
||||
|
||||
+97
-30
@@ -36,9 +36,17 @@ func Test_TaskStatus(t *testing.T) {
|
||||
hf: func(ctx context.Context, _ time.Time) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/api/experimental/tasks/me/doesnotexist":
|
||||
httpapi.ResourceNotFound(w)
|
||||
return
|
||||
case "/api/experimental/tasks":
|
||||
if r.URL.Query().Get("q") == "owner:\"me\"" {
|
||||
httpapi.Write(ctx, w, http.StatusOK, struct {
|
||||
Tasks []codersdk.Task `json:"tasks"`
|
||||
Count int `json:"count"`
|
||||
}{
|
||||
Tasks: []codersdk.Task{},
|
||||
Count: 0,
|
||||
})
|
||||
return
|
||||
}
|
||||
default:
|
||||
t.Errorf("unexpected path: %s", r.URL.Path)
|
||||
}
|
||||
@@ -52,7 +60,35 @@ func Test_TaskStatus(t *testing.T) {
|
||||
hf: func(ctx context.Context, now time.Time) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/api/experimental/tasks/me/exists":
|
||||
case "/api/experimental/tasks":
|
||||
if r.URL.Query().Get("q") == "owner:\"me\"" {
|
||||
httpapi.Write(ctx, w, http.StatusOK, struct {
|
||||
Tasks []codersdk.Task `json:"tasks"`
|
||||
Count int `json:"count"`
|
||||
}{
|
||||
Tasks: []codersdk.Task{{
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
Name: "exists",
|
||||
OwnerName: "me",
|
||||
WorkspaceStatus: codersdk.WorkspaceStatusRunning,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
CurrentState: &codersdk.TaskStateEntry{
|
||||
State: codersdk.TaskStateWorking,
|
||||
Timestamp: now,
|
||||
Message: "Thinking furiously...",
|
||||
},
|
||||
WorkspaceAgentHealth: &codersdk.WorkspaceAgentHealth{
|
||||
Healthy: true,
|
||||
},
|
||||
WorkspaceAgentLifecycle: ptr.Ref(codersdk.WorkspaceAgentLifecycleReady),
|
||||
Status: codersdk.TaskStatusActive,
|
||||
}},
|
||||
Count: 1,
|
||||
})
|
||||
return
|
||||
}
|
||||
case "/api/experimental/tasks/me/11111111-1111-1111-1111-111111111111":
|
||||
httpapi.Write(ctx, w, http.StatusOK, codersdk.Task{
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
WorkspaceStatus: codersdk.WorkspaceStatusRunning,
|
||||
@@ -88,21 +124,30 @@ func Test_TaskStatus(t *testing.T) {
|
||||
var calls atomic.Int64
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/api/experimental/tasks/me/exists":
|
||||
httpapi.Write(ctx, w, http.StatusOK, codersdk.Task{
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
Name: "exists",
|
||||
OwnerName: "me",
|
||||
WorkspaceStatus: codersdk.WorkspaceStatusPending,
|
||||
CreatedAt: now.Add(-5 * time.Second),
|
||||
UpdatedAt: now.Add(-5 * time.Second),
|
||||
WorkspaceAgentHealth: &codersdk.WorkspaceAgentHealth{
|
||||
Healthy: true,
|
||||
},
|
||||
WorkspaceAgentLifecycle: ptr.Ref(codersdk.WorkspaceAgentLifecycleReady),
|
||||
Status: codersdk.TaskStatusPending,
|
||||
})
|
||||
return
|
||||
case "/api/experimental/tasks":
|
||||
if r.URL.Query().Get("q") == "owner:\"me\"" {
|
||||
// Return initial task state for --watch test
|
||||
httpapi.Write(ctx, w, http.StatusOK, struct {
|
||||
Tasks []codersdk.Task `json:"tasks"`
|
||||
Count int `json:"count"`
|
||||
}{
|
||||
Tasks: []codersdk.Task{{
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
Name: "exists",
|
||||
OwnerName: "me",
|
||||
WorkspaceStatus: codersdk.WorkspaceStatusPending,
|
||||
CreatedAt: now.Add(-5 * time.Second),
|
||||
UpdatedAt: now.Add(-5 * time.Second),
|
||||
WorkspaceAgentHealth: &codersdk.WorkspaceAgentHealth{
|
||||
Healthy: true,
|
||||
},
|
||||
WorkspaceAgentLifecycle: ptr.Ref(codersdk.WorkspaceAgentLifecycleReady),
|
||||
Status: codersdk.TaskStatusPending,
|
||||
}},
|
||||
Count: 1,
|
||||
})
|
||||
return
|
||||
}
|
||||
case "/api/experimental/tasks/me/11111111-1111-1111-1111-111111111111":
|
||||
defer calls.Add(1)
|
||||
switch calls.Load() {
|
||||
@@ -218,18 +263,40 @@ func Test_TaskStatus(t *testing.T) {
|
||||
ts := time.Date(2025, 8, 26, 12, 34, 56, 0, time.UTC)
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/api/experimental/tasks/me/exists":
|
||||
case "/api/experimental/tasks":
|
||||
if r.URL.Query().Get("q") == "owner:\"me\"" {
|
||||
httpapi.Write(ctx, w, http.StatusOK, struct {
|
||||
Tasks []codersdk.Task `json:"tasks"`
|
||||
Count int `json:"count"`
|
||||
}{
|
||||
Tasks: []codersdk.Task{{
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
Name: "exists",
|
||||
OwnerName: "me",
|
||||
WorkspaceStatus: codersdk.WorkspaceStatusRunning,
|
||||
CreatedAt: ts,
|
||||
UpdatedAt: ts,
|
||||
CurrentState: &codersdk.TaskStateEntry{
|
||||
State: codersdk.TaskStateWorking,
|
||||
Timestamp: ts.Add(time.Second),
|
||||
Message: "Thinking furiously...",
|
||||
},
|
||||
WorkspaceAgentHealth: &codersdk.WorkspaceAgentHealth{
|
||||
Healthy: true,
|
||||
},
|
||||
WorkspaceAgentLifecycle: ptr.Ref(codersdk.WorkspaceAgentLifecycleReady),
|
||||
Status: codersdk.TaskStatusActive,
|
||||
}},
|
||||
Count: 1,
|
||||
})
|
||||
return
|
||||
}
|
||||
case "/api/experimental/tasks/me/11111111-1111-1111-1111-111111111111":
|
||||
httpapi.Write(ctx, w, http.StatusOK, codersdk.Task{
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
Name: "exists",
|
||||
OwnerName: "me",
|
||||
WorkspaceAgentHealth: &codersdk.WorkspaceAgentHealth{
|
||||
Healthy: true,
|
||||
},
|
||||
WorkspaceAgentLifecycle: ptr.Ref(codersdk.WorkspaceAgentLifecycleReady),
|
||||
WorkspaceStatus: codersdk.WorkspaceStatusRunning,
|
||||
CreatedAt: ts,
|
||||
UpdatedAt: ts,
|
||||
ID: uuid.MustParse("11111111-1111-1111-1111-111111111111"),
|
||||
WorkspaceStatus: codersdk.WorkspaceStatusRunning,
|
||||
CreatedAt: ts,
|
||||
UpdatedAt: ts,
|
||||
CurrentState: &codersdk.TaskStateEntry{
|
||||
State: codersdk.TaskStateWorking,
|
||||
Timestamp: ts.Add(time.Second),
|
||||
|
||||
+15
-10
@@ -53,6 +53,7 @@ func Test_Tasks(t *testing.T) {
|
||||
taskName = strings.ReplaceAll(testutil.GetRandomName(t), "_", "-")
|
||||
)
|
||||
|
||||
//nolint:paralleltest // The sub-tests of this test must be run sequentially.
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
cmdArgs []string
|
||||
@@ -134,15 +135,16 @@ func Test_Tasks(t *testing.T) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Logf("test case: %q", tc.name)
|
||||
var stdout strings.Builder
|
||||
inv, root := clitest.New(t, tc.cmdArgs...)
|
||||
inv.Stdout = &stdout
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
require.NoError(t, inv.WithContext(ctx).Run(), tc.name)
|
||||
if tc.assertFn != nil {
|
||||
tc.assertFn(stdout.String(), userClient)
|
||||
}
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var stdout strings.Builder
|
||||
inv, root := clitest.New(t, tc.cmdArgs...)
|
||||
inv.Stdout = &stdout
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
require.NoError(t, inv.WithContext(ctx).Run())
|
||||
if tc.assertFn != nil {
|
||||
tc.assertFn(stdout.String(), userClient)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -291,6 +293,7 @@ func createAITaskTemplate(t *testing.T, client *codersdk.Client, orgID uuid.UUID
|
||||
{
|
||||
Type: &proto.Response_Plan{
|
||||
Plan: &proto.PlanComplete{
|
||||
Parameters: []*proto.RichParameter{{Name: codersdk.AITaskPromptParameterName, Type: "string"}},
|
||||
HasAiTasks: true,
|
||||
},
|
||||
},
|
||||
@@ -325,7 +328,9 @@ func createAITaskTemplate(t *testing.T, client *codersdk.Client, orgID uuid.UUID
|
||||
},
|
||||
AiTasks: []*proto.AITask{
|
||||
{
|
||||
AppId: taskAppID.String(),
|
||||
SidebarApp: &proto.AITaskSidebarApp{
|
||||
Id: taskAppID.String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -1,355 +0,0 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/cli"
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
)
|
||||
|
||||
// mockKeyring is a mock sessionstore.Backend implementation.
|
||||
type mockKeyring struct {
|
||||
credentials map[string]string // service name -> credential
|
||||
}
|
||||
|
||||
const mockServiceName = "mock-service-name"
|
||||
|
||||
func newMockKeyring() *mockKeyring {
|
||||
return &mockKeyring{credentials: make(map[string]string)}
|
||||
}
|
||||
|
||||
func (m *mockKeyring) Read(_ *url.URL) (string, error) {
|
||||
cred, ok := m.credentials[mockServiceName]
|
||||
if !ok {
|
||||
return "", os.ErrNotExist
|
||||
}
|
||||
return cred, nil
|
||||
}
|
||||
|
||||
func (m *mockKeyring) Write(_ *url.URL, token string) error {
|
||||
m.credentials[mockServiceName] = token
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockKeyring) Delete(_ *url.URL) error {
|
||||
_, ok := m.credentials[mockServiceName]
|
||||
if !ok {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
delete(m.credentials, mockServiceName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestUseKeyring(t *testing.T) {
|
||||
// Verify that the --use-keyring flag opts into using a keyring backend for
|
||||
// storing session tokens instead of plain text files.
|
||||
t.Parallel()
|
||||
|
||||
t.Run("Login", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a test server
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Create a pty for interactive prompts
|
||||
pty := ptytest.New(t)
|
||||
|
||||
// Create CLI invocation with --use-keyring flag
|
||||
inv, cfg := clitest.New(t,
|
||||
"login",
|
||||
"--force-tty",
|
||||
"--use-keyring",
|
||||
"--no-open",
|
||||
client.URL.String(),
|
||||
)
|
||||
inv.Stdin = pty.Input()
|
||||
inv.Stdout = pty.Output()
|
||||
|
||||
// Inject the mock backend before running the command
|
||||
var root cli.RootCmd
|
||||
cmd, err := root.Command(root.AGPL())
|
||||
require.NoError(t, err)
|
||||
mockBackend := newMockKeyring()
|
||||
root.WithSessionStorageBackend(mockBackend)
|
||||
inv.Command = cmd
|
||||
|
||||
// Run login in background
|
||||
doneChan := make(chan struct{})
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
// Provide the token when prompted
|
||||
pty.ExpectMatch("Paste your token here:")
|
||||
pty.WriteLine(client.SessionToken())
|
||||
pty.ExpectMatch("Welcome to Coder")
|
||||
<-doneChan
|
||||
|
||||
// Verify that session file was NOT created (using keyring instead)
|
||||
sessionFile := path.Join(string(cfg), "session")
|
||||
_, err = os.Stat(sessionFile)
|
||||
require.True(t, os.IsNotExist(err), "session file should not exist when using keyring")
|
||||
|
||||
// Verify that the credential IS stored in mock keyring
|
||||
cred, err := mockBackend.Read(nil)
|
||||
require.NoError(t, err, "credential should be stored in mock keyring")
|
||||
require.Equal(t, client.SessionToken(), cred, "stored token should match login token")
|
||||
})
|
||||
|
||||
t.Run("Logout", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a test server
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Create a pty for interactive prompts
|
||||
pty := ptytest.New(t)
|
||||
|
||||
// First, login with --use-keyring
|
||||
loginInv, cfg := clitest.New(t,
|
||||
"login",
|
||||
"--force-tty",
|
||||
"--use-keyring",
|
||||
"--no-open",
|
||||
client.URL.String(),
|
||||
)
|
||||
loginInv.Stdin = pty.Input()
|
||||
loginInv.Stdout = pty.Output()
|
||||
|
||||
// Inject the mock backend
|
||||
var loginRoot cli.RootCmd
|
||||
loginCmd, err := loginRoot.Command(loginRoot.AGPL())
|
||||
require.NoError(t, err)
|
||||
mockBackend := newMockKeyring()
|
||||
loginRoot.WithSessionStorageBackend(mockBackend)
|
||||
loginInv.Command = loginCmd
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := loginInv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
pty.ExpectMatch("Paste your token here:")
|
||||
pty.WriteLine(client.SessionToken())
|
||||
pty.ExpectMatch("Welcome to Coder")
|
||||
<-doneChan
|
||||
|
||||
// Verify credential exists in mock keyring
|
||||
cred, err := mockBackend.Read(nil)
|
||||
require.NoError(t, err, "read credential should succeed before logout")
|
||||
require.NotEmpty(t, cred, "credential should exist after logout")
|
||||
|
||||
// Now run logout with --use-keyring
|
||||
logoutInv, _ := clitest.New(t,
|
||||
"logout",
|
||||
"--use-keyring",
|
||||
"--yes",
|
||||
"--global-config", string(cfg),
|
||||
)
|
||||
|
||||
// Inject the same mock backend
|
||||
var logoutRoot cli.RootCmd
|
||||
logoutCmd, err := logoutRoot.Command(logoutRoot.AGPL())
|
||||
require.NoError(t, err)
|
||||
logoutRoot.WithSessionStorageBackend(mockBackend)
|
||||
logoutInv.Command = logoutCmd
|
||||
|
||||
var logoutOut bytes.Buffer
|
||||
logoutInv.Stdout = &logoutOut
|
||||
|
||||
err = logoutInv.Run()
|
||||
require.NoError(t, err, "logout should succeed")
|
||||
|
||||
// Verify the credential was deleted from mock keyring
|
||||
_, err = mockBackend.Read(nil)
|
||||
require.ErrorIs(t, err, os.ErrNotExist, "credential should be deleted from keyring after logout")
|
||||
})
|
||||
|
||||
t.Run("OmitFlag", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a test server
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Create a pty for interactive prompts
|
||||
pty := ptytest.New(t)
|
||||
|
||||
// --use-keyring flag omitted (should use file-based storage)
|
||||
inv, cfg := clitest.New(t,
|
||||
"login",
|
||||
"--force-tty",
|
||||
"--no-open",
|
||||
client.URL.String(),
|
||||
)
|
||||
inv.Stdin = pty.Input()
|
||||
inv.Stdout = pty.Output()
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
pty.ExpectMatch("Paste your token here:")
|
||||
pty.WriteLine(client.SessionToken())
|
||||
pty.ExpectMatch("Welcome to Coder")
|
||||
<-doneChan
|
||||
|
||||
// Verify that session file WAS created (not using keyring)
|
||||
sessionFile := path.Join(string(cfg), "session")
|
||||
_, err := os.Stat(sessionFile)
|
||||
require.NoError(t, err, "session file should exist when NOT using --use-keyring")
|
||||
|
||||
// Read and verify the token from file
|
||||
content, err := os.ReadFile(sessionFile)
|
||||
require.NoError(t, err, "should be able to read session file")
|
||||
require.Equal(t, client.SessionToken(), string(content), "file should contain the session token")
|
||||
})
|
||||
|
||||
t.Run("EnvironmentVariable", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a test server
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Create a pty for interactive prompts
|
||||
pty := ptytest.New(t)
|
||||
|
||||
// Login using CODER_USE_KEYRING environment variable instead of flag
|
||||
inv, cfg := clitest.New(t,
|
||||
"login",
|
||||
"--force-tty",
|
||||
"--no-open",
|
||||
client.URL.String(),
|
||||
)
|
||||
inv.Stdin = pty.Input()
|
||||
inv.Stdout = pty.Output()
|
||||
inv.Environ.Set("CODER_USE_KEYRING", "true")
|
||||
|
||||
// Inject the mock backend
|
||||
var root cli.RootCmd
|
||||
cmd, err := root.Command(root.AGPL())
|
||||
require.NoError(t, err)
|
||||
mockBackend := newMockKeyring()
|
||||
root.WithSessionStorageBackend(mockBackend)
|
||||
inv.Command = cmd
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
pty.ExpectMatch("Paste your token here:")
|
||||
pty.WriteLine(client.SessionToken())
|
||||
pty.ExpectMatch("Welcome to Coder")
|
||||
<-doneChan
|
||||
|
||||
// Verify that session file was NOT created (using keyring via env var)
|
||||
sessionFile := path.Join(string(cfg), "session")
|
||||
_, err = os.Stat(sessionFile)
|
||||
require.True(t, os.IsNotExist(err), "session file should not exist when using keyring via env var")
|
||||
|
||||
// Verify credential is in mock keyring
|
||||
cred, err := mockBackend.Read(nil)
|
||||
require.NoError(t, err, "credential should be stored in keyring when CODER_USE_KEYRING=true")
|
||||
require.NotEmpty(t, cred)
|
||||
})
|
||||
}
|
||||
|
||||
func TestUseKeyringUnsupportedOS(t *testing.T) {
|
||||
// Verify that trying to use --use-keyring on an unsupported operating system produces
|
||||
// a helpful error message.
|
||||
t.Parallel()
|
||||
|
||||
// Skip on Windows since the keyring is actually supported.
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Skipping unsupported OS test on Windows where keyring is supported")
|
||||
}
|
||||
|
||||
const expMessage = "keyring storage is not supported on this operating system; remove the --use-keyring flag"
|
||||
|
||||
t.Run("LoginWithUnsupportedKeyring", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Try to login with --use-keyring on an unsupported OS
|
||||
inv, _ := clitest.New(t,
|
||||
"login",
|
||||
"--use-keyring",
|
||||
client.URL.String(),
|
||||
)
|
||||
|
||||
// The error should occur immediately, before any prompts
|
||||
loginErr := inv.Run()
|
||||
|
||||
// Verify we got an error about unsupported OS
|
||||
require.Error(t, loginErr)
|
||||
require.Contains(t, loginErr.Error(), expMessage)
|
||||
})
|
||||
|
||||
t.Run("LogoutWithUnsupportedKeyring", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
pty := ptytest.New(t)
|
||||
|
||||
// First login without keyring to create a session
|
||||
loginInv, cfg := clitest.New(t,
|
||||
"login",
|
||||
"--force-tty",
|
||||
"--no-open",
|
||||
client.URL.String(),
|
||||
)
|
||||
loginInv.Stdin = pty.Input()
|
||||
loginInv.Stdout = pty.Output()
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := loginInv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
pty.ExpectMatch("Paste your token here:")
|
||||
pty.WriteLine(client.SessionToken())
|
||||
pty.ExpectMatch("Welcome to Coder")
|
||||
<-doneChan
|
||||
|
||||
// Now try to logout with --use-keyring on an unsupported OS
|
||||
logoutInv, _ := clitest.New(t,
|
||||
"logout",
|
||||
"--use-keyring",
|
||||
"--yes",
|
||||
"--global-config", string(cfg),
|
||||
)
|
||||
|
||||
err := logoutInv.Run()
|
||||
// Verify we got an error about unsupported OS
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), expMessage)
|
||||
})
|
||||
}
|
||||
+5
-24
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/coder/pretty"
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/cli/sessionstore"
|
||||
"github.com/coder/coder/v2/coderd/userpassword"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/serpent"
|
||||
@@ -115,11 +114,9 @@ func (r *RootCmd) loginWithPassword(
|
||||
}
|
||||
|
||||
sessionToken := resp.SessionToken
|
||||
err = r.ensureTokenBackend().Write(client.URL, sessionToken)
|
||||
config := r.createConfig()
|
||||
err = config.Session().Write(sessionToken)
|
||||
if err != nil {
|
||||
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
|
||||
return errKeyringNotSupported
|
||||
}
|
||||
return xerrors.Errorf("write session token: %w", err)
|
||||
}
|
||||
|
||||
@@ -152,15 +149,11 @@ func (r *RootCmd) login() *serpent.Command {
|
||||
useTokenForSession bool
|
||||
)
|
||||
cmd := &serpent.Command{
|
||||
Use: "login [<url>]",
|
||||
Short: "Authenticate with Coder deployment",
|
||||
Long: "By default, the session token is stored in a plain text file. Use the " +
|
||||
"--use-keyring flag or set CODER_USE_KEYRING=true to store the token in " +
|
||||
"the operating system keyring instead.",
|
||||
Use: "login [<url>]",
|
||||
Short: "Authenticate with Coder deployment",
|
||||
Middleware: serpent.RequireRangeArgs(0, 1),
|
||||
Handler: func(inv *serpent.Invocation) error {
|
||||
ctx := inv.Context()
|
||||
|
||||
rawURL := ""
|
||||
var urlSource string
|
||||
|
||||
@@ -205,15 +198,6 @@ func (r *RootCmd) login() *serpent.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check keyring availability before prompting the user for a token to fail fast.
|
||||
if r.useKeyring {
|
||||
backend := r.ensureTokenBackend()
|
||||
_, err := backend.Read(client.URL)
|
||||
if err != nil && xerrors.Is(err, sessionstore.ErrNotImplemented) {
|
||||
return errKeyringNotSupported
|
||||
}
|
||||
}
|
||||
|
||||
hasFirstUser, err := client.HasFirstUser(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to check server %q for first user, is the URL correct and is coder accessible from your browser? Error - has initial user: %w", serverURL.String(), err)
|
||||
@@ -410,11 +394,8 @@ func (r *RootCmd) login() *serpent.Command {
|
||||
}
|
||||
|
||||
config := r.createConfig()
|
||||
err = r.ensureTokenBackend().Write(client.URL, sessionToken)
|
||||
err = config.Session().Write(sessionToken)
|
||||
if err != nil {
|
||||
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
|
||||
return errKeyringNotSupported
|
||||
}
|
||||
return xerrors.Errorf("write session token: %w", err)
|
||||
}
|
||||
err = config.URL().Write(serverURL.String())
|
||||
|
||||
+3
-8
@@ -8,7 +8,6 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/cli/sessionstore"
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
|
||||
@@ -47,15 +46,11 @@ func (r *RootCmd) logout() *serpent.Command {
|
||||
errors = append(errors, xerrors.Errorf("remove URL file: %w", err))
|
||||
}
|
||||
|
||||
err = r.ensureTokenBackend().Delete(client.URL)
|
||||
err = config.Session().Delete()
|
||||
// Only throw error if the session configuration file is present,
|
||||
// otherwise the user is already logged out, and we proceed
|
||||
if err != nil && !xerrors.Is(err, os.ErrNotExist) {
|
||||
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
|
||||
errors = append(errors, errKeyringNotSupported)
|
||||
} else {
|
||||
errors = append(errors, xerrors.Errorf("remove session token: %w", err))
|
||||
}
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
errors = append(errors, xerrors.Errorf("remove session file: %w", err))
|
||||
}
|
||||
|
||||
err = config.Organization().Delete()
|
||||
|
||||
+7
-50
@@ -37,7 +37,6 @@ import (
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/cli/config"
|
||||
"github.com/coder/coder/v2/cli/gitauth"
|
||||
"github.com/coder/coder/v2/cli/sessionstore"
|
||||
"github.com/coder/coder/v2/cli/telemetry"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
@@ -55,8 +54,6 @@ var (
|
||||
// ErrSilent is a sentinel error that tells the command handler to just exit with a non-zero error, but not print
|
||||
// anything.
|
||||
ErrSilent = xerrors.New("silent error")
|
||||
|
||||
errKeyringNotSupported = xerrors.New("keyring storage is not supported on this operating system; remove the --use-keyring flag to use file-based storage")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -71,14 +68,12 @@ const (
|
||||
varVerbose = "verbose"
|
||||
varDisableDirect = "disable-direct-connections"
|
||||
varDisableNetworkTelemetry = "disable-network-telemetry"
|
||||
varUseKeyring = "use-keyring"
|
||||
|
||||
notLoggedInMessage = "You are not logged in. Try logging in using '%s login <url>'."
|
||||
|
||||
envNoVersionCheck = "CODER_NO_VERSION_WARNING"
|
||||
envNoFeatureWarning = "CODER_NO_FEATURE_WARNING"
|
||||
envSessionToken = "CODER_SESSION_TOKEN"
|
||||
envUseKeyring = "CODER_USE_KEYRING"
|
||||
//nolint:gosec
|
||||
envAgentToken = "CODER_AGENT_TOKEN"
|
||||
//nolint:gosec
|
||||
@@ -479,15 +474,6 @@ func (r *RootCmd) Command(subcommands []*serpent.Command) (*serpent.Command, err
|
||||
Value: serpent.BoolOf(&r.disableNetworkTelemetry),
|
||||
Group: globalGroup,
|
||||
},
|
||||
{
|
||||
Flag: varUseKeyring,
|
||||
Env: envUseKeyring,
|
||||
Description: "Store and retrieve session tokens using the operating system " +
|
||||
"keyring. Currently only supported on Windows. By default, tokens are " +
|
||||
"stored in plain text files.",
|
||||
Value: serpent.BoolOf(&r.useKeyring),
|
||||
Group: globalGroup,
|
||||
},
|
||||
{
|
||||
Flag: "debug-http",
|
||||
Description: "Debug codersdk HTTP requests.",
|
||||
@@ -522,7 +508,6 @@ func (r *RootCmd) Command(subcommands []*serpent.Command) (*serpent.Command, err
|
||||
type RootCmd struct {
|
||||
clientURL *url.URL
|
||||
token string
|
||||
tokenBackend sessionstore.Backend
|
||||
globalConfig string
|
||||
header []string
|
||||
headerCommand string
|
||||
@@ -537,7 +522,6 @@ type RootCmd struct {
|
||||
disableNetworkTelemetry bool
|
||||
noVersionCheck bool
|
||||
noFeatureWarning bool
|
||||
useKeyring bool
|
||||
}
|
||||
|
||||
// InitClient creates and configures a new client with authentication, telemetry,
|
||||
@@ -565,19 +549,14 @@ func (r *RootCmd) InitClient(inv *serpent.Invocation) (*codersdk.Client, error)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Read the token stored on disk.
|
||||
if r.token == "" {
|
||||
tok, err := r.ensureTokenBackend().Read(r.clientURL)
|
||||
r.token, err = conf.Session().Read()
|
||||
// Even if there isn't a token, we don't care.
|
||||
// Some API routes can be unauthenticated.
|
||||
if err != nil && !xerrors.Is(err, os.ErrNotExist) {
|
||||
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
|
||||
return nil, errKeyringNotSupported
|
||||
}
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
if tok != "" {
|
||||
r.token = tok
|
||||
}
|
||||
}
|
||||
|
||||
// Configure HTTP client with transport wrappers
|
||||
@@ -609,6 +588,7 @@ func (r *RootCmd) InitClient(inv *serpent.Invocation) (*codersdk.Client, error)
|
||||
// This allows commands to run without requiring authentication, but still use auth if available.
|
||||
func (r *RootCmd) TryInitClient(inv *serpent.Invocation) (*codersdk.Client, error) {
|
||||
conf := r.createConfig()
|
||||
var err error
|
||||
// Read the client URL stored on disk.
|
||||
if r.clientURL == nil || r.clientURL.String() == "" {
|
||||
rawURL, err := conf.URL().Read()
|
||||
@@ -625,19 +605,14 @@ func (r *RootCmd) TryInitClient(inv *serpent.Invocation) (*codersdk.Client, erro
|
||||
}
|
||||
}
|
||||
}
|
||||
// Read the token stored on disk.
|
||||
if r.token == "" {
|
||||
tok, err := r.ensureTokenBackend().Read(r.clientURL)
|
||||
r.token, err = conf.Session().Read()
|
||||
// Even if there isn't a token, we don't care.
|
||||
// Some API routes can be unauthenticated.
|
||||
if err != nil && !xerrors.Is(err, os.ErrNotExist) {
|
||||
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
|
||||
return nil, errKeyringNotSupported
|
||||
}
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
if tok != "" {
|
||||
r.token = tok
|
||||
}
|
||||
}
|
||||
|
||||
// Only configure the client if we have a URL
|
||||
@@ -713,24 +688,6 @@ func (r *RootCmd) createUnauthenticatedClient(ctx context.Context, serverURL *ur
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// ensureTokenBackend returns the session token storage backend, creating it if necessary.
|
||||
// This must be called after flags are parsed so we can respect the value of the --use-keyring
|
||||
// flag.
|
||||
func (r *RootCmd) ensureTokenBackend() sessionstore.Backend {
|
||||
if r.tokenBackend == nil {
|
||||
if r.useKeyring {
|
||||
r.tokenBackend = sessionstore.NewKeyring()
|
||||
} else {
|
||||
r.tokenBackend = sessionstore.NewFile(r.createConfig)
|
||||
}
|
||||
}
|
||||
return r.tokenBackend
|
||||
}
|
||||
|
||||
func (r *RootCmd) WithSessionStorageBackend(backend sessionstore.Backend) {
|
||||
r.tokenBackend = backend
|
||||
}
|
||||
|
||||
type AgentAuth struct {
|
||||
// Agent Client config
|
||||
agentToken string
|
||||
|
||||
@@ -1,239 +0,0 @@
|
||||
// Package sessionstore provides CLI session token storage mechanisms.
|
||||
// Operating system keyring storage is intended to have compatibility with other Coder
|
||||
// applications (e.g. Coder Desktop, Coder provider for JetBrains Toolbox, etc) so that
|
||||
// applications can read/write the same credential stored in the keyring.
|
||||
//
|
||||
// Note that we aren't using an existing Go package zalando/go-keyring here for a few
|
||||
// reasons. 1) It prescribes the format of the target credential name in the OS keyrings,
|
||||
// which makes our life difficult for compatibility with other Coder applications. 2)
|
||||
// It uses init functions that make it difficult to test with. As a result, the OS
|
||||
// keyring implementations may be adapted from zalando/go-keyring source (i.e. Windows).
|
||||
package sessionstore
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/config"
|
||||
)
|
||||
|
||||
// Backend is a storage backend for session tokens.
|
||||
type Backend interface {
|
||||
// Read returns the session token for the given server URL or an error, if any. It
|
||||
// will return os.ErrNotExist if no token exists for the given URL.
|
||||
Read(serverURL *url.URL) (string, error)
|
||||
// Write stores the session token for the given server URL.
|
||||
Write(serverURL *url.URL, token string) error
|
||||
// Delete removes the session token for the given server URL or an error, if any.
|
||||
// It will return os.ErrNotExist error if no token exists to delete.
|
||||
Delete(serverURL *url.URL) error
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
// ErrSetDataTooBig is returned if `keyringProvider.Set` was called with too much data.
|
||||
// On macOS: The combination of service, username & password should not exceed ~3000 bytes
|
||||
// On Windows: The service is limited to 32KiB while the password is limited to 2560 bytes
|
||||
ErrSetDataTooBig = xerrors.New("data passed to Set was too big")
|
||||
|
||||
// ErrNotImplemented represents when keyring usage is not implemented on the current
|
||||
// operating system.
|
||||
ErrNotImplemented = xerrors.New("not implemented")
|
||||
)
|
||||
|
||||
// keyringProvider represents an operating system keyring. The expectation
|
||||
// is these methods operate on the user/login keyring.
|
||||
type keyringProvider interface {
|
||||
// Set stores the given credential for a service name in the operating system
|
||||
// keyring.
|
||||
Set(service, credential string) error
|
||||
// Get retrieves the credential from the keyring. It must return os.ErrNotExist
|
||||
// if the credential is not found.
|
||||
Get(service string) ([]byte, error)
|
||||
// Delete deletes the credential from the keyring. It must return os.ErrNotExist
|
||||
// if the credential is not found.
|
||||
Delete(service string) error
|
||||
}
|
||||
|
||||
// credential represents a single credential entry.
|
||||
type credential struct {
|
||||
CoderURL string `json:"coder_url"`
|
||||
APIToken string `json:"api_token"`
|
||||
}
|
||||
|
||||
// credentialsMap represents the JSON structure stored in the operating system keyring.
|
||||
// It supports storing multiple credentials for different server URLs.
|
||||
type credentialsMap map[string]credential
|
||||
|
||||
// normalizeHost returns a normalized version of the URL host for use as a map key.
|
||||
func normalizeHost(u *url.URL) (string, error) {
|
||||
if u == nil || u.Host == "" {
|
||||
return "", xerrors.New("nil server URL")
|
||||
}
|
||||
return strings.TrimSpace(strings.ToLower(u.Host)), nil
|
||||
}
|
||||
|
||||
// parseCredentialsJSON parses the JSON from the keyring into a credentialsMap.
|
||||
func parseCredentialsJSON(jsonData []byte) (credentialsMap, error) {
|
||||
if len(jsonData) == 0 {
|
||||
return make(credentialsMap), nil
|
||||
}
|
||||
|
||||
var creds credentialsMap
|
||||
if err := json.Unmarshal(jsonData, &creds); err != nil {
|
||||
return nil, xerrors.Errorf("unmarshal credentials: %w", err)
|
||||
}
|
||||
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
// Keyring is a Backend that exclusively stores the session token in the operating
|
||||
// system keyring. Happy path usage of this type should start with NewKeyring.
|
||||
// It stores a JSON object in the keyring that supports multiple credentials for
|
||||
// different server URLs, providing compatibility with Coder Desktop and other Coder
|
||||
// applications.
|
||||
type Keyring struct {
|
||||
provider keyringProvider
|
||||
serviceName string
|
||||
}
|
||||
|
||||
// NewKeyring creates a Keyring with the default service name for production use.
|
||||
func NewKeyring() Keyring {
|
||||
return Keyring{
|
||||
provider: operatingSystemKeyring{},
|
||||
serviceName: defaultServiceName,
|
||||
}
|
||||
}
|
||||
|
||||
// NewKeyringWithService creates a Keyring Backend that stores credentials under the
|
||||
// specified service name. This is primarily intended for testing to avoid conflicts
|
||||
// with production credentials and collisions between tests.
|
||||
func NewKeyringWithService(serviceName string) Keyring {
|
||||
return Keyring{
|
||||
provider: operatingSystemKeyring{},
|
||||
serviceName: serviceName,
|
||||
}
|
||||
}
|
||||
|
||||
func (o Keyring) Read(serverURL *url.URL) (string, error) {
|
||||
host, err := normalizeHost(serverURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
credJSON, err := o.provider.Get(o.serviceName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(credJSON) == 0 {
|
||||
return "", os.ErrNotExist
|
||||
}
|
||||
|
||||
creds, err := parseCredentialsJSON(credJSON)
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("read: parse existing credentials: %w", err)
|
||||
}
|
||||
|
||||
// Return the credential for the specified URL
|
||||
cred, ok := creds[host]
|
||||
if !ok {
|
||||
return "", os.ErrNotExist
|
||||
}
|
||||
return cred.APIToken, nil
|
||||
}
|
||||
|
||||
func (o Keyring) Write(serverURL *url.URL, token string) error {
|
||||
host, err := normalizeHost(serverURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
existingJSON, err := o.provider.Get(o.serviceName)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return xerrors.Errorf("read existing credentials: %w", err)
|
||||
}
|
||||
|
||||
creds, err := parseCredentialsJSON(existingJSON)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("write: parse existing credentials: %w", err)
|
||||
}
|
||||
|
||||
// Upsert the credential for this URL.
|
||||
creds[host] = credential{
|
||||
CoderURL: host,
|
||||
APIToken: token,
|
||||
}
|
||||
|
||||
credsJSON, err := json.Marshal(creds)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("marshal credentials: %w", err)
|
||||
}
|
||||
|
||||
err = o.provider.Set(o.serviceName, string(credsJSON))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("write credentials to keyring: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o Keyring) Delete(serverURL *url.URL) error {
|
||||
host, err := normalizeHost(serverURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
existingJSON, err := o.provider.Get(o.serviceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
creds, err := parseCredentialsJSON(existingJSON)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to parse existing credentials: %w", err)
|
||||
}
|
||||
|
||||
if _, ok := creds[host]; !ok {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
|
||||
delete(creds, host)
|
||||
|
||||
// Delete the entire keyring entry when no credentials remain.
|
||||
if len(creds) == 0 {
|
||||
return o.provider.Delete(o.serviceName)
|
||||
}
|
||||
|
||||
// Write back the updated credentials map.
|
||||
credsJSON, err := json.Marshal(creds)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to marshal credentials: %w", err)
|
||||
}
|
||||
|
||||
return o.provider.Set(o.serviceName, string(credsJSON))
|
||||
}
|
||||
|
||||
// File is a Backend that exclusively stores the session token in a file on disk.
|
||||
type File struct {
|
||||
config func() config.Root
|
||||
}
|
||||
|
||||
func NewFile(f func() config.Root) *File {
|
||||
return &File{config: f}
|
||||
}
|
||||
|
||||
func (f *File) Read(_ *url.URL) (string, error) {
|
||||
return f.config().Session().Read()
|
||||
}
|
||||
|
||||
func (f *File) Write(_ *url.URL, token string) error {
|
||||
return f.config().Session().Write(token)
|
||||
}
|
||||
|
||||
func (f *File) Delete(_ *url.URL) error {
|
||||
return f.config().Session().Delete()
|
||||
}
|
||||
@@ -1,121 +0,0 @@
|
||||
package sessionstore
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNormalizeHost(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
url *url.URL
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "StandardHost",
|
||||
url: &url.URL{Host: "coder.example.com"},
|
||||
want: "coder.example.com",
|
||||
},
|
||||
{
|
||||
name: "HostWithPort",
|
||||
url: &url.URL{Host: "coder.example.com:8080"},
|
||||
want: "coder.example.com:8080",
|
||||
},
|
||||
{
|
||||
name: "UppercaseHost",
|
||||
url: &url.URL{Host: "CODER.EXAMPLE.COM"},
|
||||
want: "coder.example.com",
|
||||
},
|
||||
{
|
||||
name: "HostWithWhitespace",
|
||||
url: &url.URL{Host: " coder.example.com "},
|
||||
want: "coder.example.com",
|
||||
},
|
||||
{
|
||||
name: "NilURL",
|
||||
url: nil,
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "EmptyHost",
|
||||
url: &url.URL{Host: ""},
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got, err := normalizeHost(tt.url)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseCredentialsJSON(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("Empty", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
creds, err := parseCredentialsJSON(nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, creds)
|
||||
require.Empty(t, creds)
|
||||
})
|
||||
|
||||
t.Run("NewFormat", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
jsonData := []byte(`{
|
||||
"coder1.example.com": {"coder_url": "coder1.example.com", "api_token": "token1"},
|
||||
"coder2.example.com": {"coder_url": "coder2.example.com", "api_token": "token2"}
|
||||
}`)
|
||||
creds, err := parseCredentialsJSON(jsonData)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, creds, 2)
|
||||
require.Equal(t, "token1", creds["coder1.example.com"].APIToken)
|
||||
require.Equal(t, "token2", creds["coder2.example.com"].APIToken)
|
||||
})
|
||||
|
||||
t.Run("InvalidJSON", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
jsonData := []byte(`{invalid json}`)
|
||||
_, err := parseCredentialsJSON(jsonData)
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCredentialsMap_RoundTrip(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
creds := credentialsMap{
|
||||
"coder1.example.com": {
|
||||
CoderURL: "coder1.example.com",
|
||||
APIToken: "token1",
|
||||
},
|
||||
"coder2.example.com:8080": {
|
||||
CoderURL: "coder2.example.com:8080",
|
||||
APIToken: "token2",
|
||||
},
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(creds)
|
||||
require.NoError(t, err)
|
||||
|
||||
parsed, err := parseCredentialsJSON(jsonData)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, creds, parsed)
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
//go:build !windows
|
||||
|
||||
package sessionstore
|
||||
|
||||
const defaultServiceName = "not-implemented"
|
||||
|
||||
type operatingSystemKeyring struct{}
|
||||
|
||||
func (operatingSystemKeyring) Set(_, _ string) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
func (operatingSystemKeyring) Get(_ string) ([]byte, error) {
|
||||
return nil, ErrNotImplemented
|
||||
}
|
||||
|
||||
func (operatingSystemKeyring) Delete(_ string) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
@@ -1,342 +0,0 @@
|
||||
package sessionstore_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/cli/config"
|
||||
"github.com/coder/coder/v2/cli/sessionstore"
|
||||
)
|
||||
|
||||
// Generate a test service name for use with the OS keyring. It uses a combination
|
||||
// of the test name and a nanosecond timestamp to prevent collisions.
|
||||
func keyringTestServiceName(t *testing.T) string {
|
||||
t.Helper()
|
||||
return t.Name() + "_" + fmt.Sprintf("%v", time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func TestKeyring(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skip("linux and darwin are not supported yet")
|
||||
}
|
||||
|
||||
// This test exercises use of the operating system keyring. As a result,
|
||||
// the operating system keyring is expected to be available.
|
||||
|
||||
const (
|
||||
testURL = "http://127.0.0.1:1337"
|
||||
testURL2 = "http://127.0.0.1:1338"
|
||||
)
|
||||
|
||||
t.Run("ReadNonExistent", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = backend.Delete(srvURL) })
|
||||
|
||||
_, err = backend.Read(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, os.IsNotExist(err), "expected os.ErrNotExist when reading non-existent token")
|
||||
})
|
||||
|
||||
t.Run("DeleteNonExistent", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = backend.Delete(srvURL) })
|
||||
|
||||
err = backend.Delete(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, errors.Is(err, os.ErrNotExist), "expected os.ErrNotExist when deleting non-existent token")
|
||||
})
|
||||
|
||||
t.Run("WriteAndRead", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = backend.Delete(srvURL) })
|
||||
|
||||
dir := t.TempDir()
|
||||
expSessionFile := path.Join(dir, "session")
|
||||
|
||||
const inputToken = "test-keyring-token-12345"
|
||||
err = backend.Write(srvURL, inputToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify no session file was created (keyring stores in OS keyring, not file)
|
||||
_, err = os.Stat(expSessionFile)
|
||||
require.True(t, errors.Is(err, os.ErrNotExist), "expected session token file to not exist when using keyring")
|
||||
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, inputToken, token)
|
||||
|
||||
// Clean up
|
||||
err = backend.Delete(srvURL)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("WriteAndDelete", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = backend.Delete(srvURL) })
|
||||
|
||||
const inputToken = "test-keyring-token-67890"
|
||||
err = backend.Write(srvURL, inputToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, inputToken, token)
|
||||
|
||||
err = backend.Delete(srvURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = backend.Read(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, os.IsNotExist(err), "expected os.ErrNotExist after deleting token")
|
||||
})
|
||||
|
||||
t.Run("OverwriteToken", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = backend.Delete(srvURL) })
|
||||
|
||||
// Write first token
|
||||
const firstToken = "first-keyring-token"
|
||||
err = backend.Write(srvURL, firstToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstToken, token)
|
||||
|
||||
// Overwrite with second token
|
||||
const secondToken = "second-keyring-token"
|
||||
err = backend.Write(srvURL, secondToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
token, err = backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, secondToken, token)
|
||||
|
||||
// Clean up
|
||||
err = backend.Delete(srvURL)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("MultipleServers", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
backend := sessionstore.NewKeyringWithService(keyringTestServiceName(t))
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
srvURL2, err := url.Parse(testURL2)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = backend.Delete(srvURL)
|
||||
_ = backend.Delete(srvURL2)
|
||||
})
|
||||
|
||||
// Write token for server 1
|
||||
const token1 = "token-for-server-1"
|
||||
err = backend.Write(srvURL, token1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write token for server 2 (should NOT overwrite server 1)
|
||||
const token2 = "token-for-server-2"
|
||||
err = backend.Write(srvURL2, token2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Read server 1's credential
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, token1, token)
|
||||
|
||||
// Read server 2's credential
|
||||
token, err = backend.Read(srvURL2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, token2, token)
|
||||
|
||||
// Delete server 1's credential
|
||||
err = backend.Delete(srvURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify server 1's credential is gone
|
||||
_, err = backend.Read(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// Verify server 2's credential still exists
|
||||
token, err = backend.Read(srvURL2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, token2, token)
|
||||
|
||||
// Clean up remaining credentials
|
||||
err = backend.Delete(srvURL2)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFile(t *testing.T) {
|
||||
const (
|
||||
testURL = "http://127.0.0.1:1337"
|
||||
testURL2 = "http://127.0.0.1:1338"
|
||||
)
|
||||
|
||||
t.Parallel()
|
||||
|
||||
t.Run("ReadNonExistent", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dir := t.TempDir()
|
||||
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = backend.Read(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, os.IsNotExist(err))
|
||||
})
|
||||
|
||||
t.Run("WriteAndRead", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dir := t.TempDir()
|
||||
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write a token
|
||||
const inputToken = "test-token-12345"
|
||||
err = backend.Write(srvURL, inputToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the session file was created
|
||||
sessionFile := config.Root(dir).Session()
|
||||
require.True(t, sessionFile.Exists())
|
||||
|
||||
// Read the token back
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, inputToken, token)
|
||||
})
|
||||
|
||||
t.Run("WriteAndDelete", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dir := t.TempDir()
|
||||
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write a token
|
||||
const inputToken = "test-token-67890"
|
||||
err = backend.Write(srvURL, inputToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the token was written
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, inputToken, token)
|
||||
|
||||
// Delete the token
|
||||
err = backend.Delete(srvURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the token is gone
|
||||
_, err = backend.Read(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, os.IsNotExist(err))
|
||||
})
|
||||
|
||||
t.Run("DeleteNonExistent", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dir := t.TempDir()
|
||||
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Attempt to delete a non-existent token
|
||||
err = backend.Delete(srvURL)
|
||||
require.Error(t, err)
|
||||
require.True(t, os.IsNotExist(err))
|
||||
})
|
||||
|
||||
t.Run("OverwriteToken", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dir := t.TempDir()
|
||||
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write first token
|
||||
const firstToken = "first-token"
|
||||
err = backend.Write(srvURL, firstToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstToken, token)
|
||||
|
||||
// Overwrite with second token
|
||||
const secondToken = "second-token"
|
||||
err = backend.Write(srvURL, secondToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
token, err = backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, secondToken, token)
|
||||
})
|
||||
|
||||
t.Run("WriteIgnoresURL", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dir := t.TempDir()
|
||||
backend := sessionstore.NewFile(func() config.Root { return config.Root(dir) })
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
srvURL2, err := url.Parse(testURL2)
|
||||
require.NoError(t, err)
|
||||
|
||||
//nolint:gosec // Write with first URL test token
|
||||
const firstToken = "token-for-url1"
|
||||
err = backend.Write(srvURL, firstToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
//nolint:gosec // Write with second URL - should overwrite
|
||||
const secondToken = "token-for-url2"
|
||||
err = backend.Write(srvURL2, secondToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should have the second token (File backend doesn't differentiate by URL)
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, secondToken, token)
|
||||
})
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package sessionstore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/danieljoos/wincred"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultServiceName is the service name used in the Windows Credential Manager
|
||||
// for storing Coder CLI session tokens.
|
||||
defaultServiceName = "coder-v2-credentials"
|
||||
)
|
||||
|
||||
// operatingSystemKeyring implements keyringProvider and uses Windows Credential Manager.
|
||||
// It is largely adapted from the zalando/go-keyring package.
|
||||
type operatingSystemKeyring struct{}
|
||||
|
||||
func (operatingSystemKeyring) Set(service, credential string) error {
|
||||
// password may not exceed 2560 bytes (https://github.com/jaraco/keyring/issues/540#issuecomment-968329967)
|
||||
if len(credential) > 2560 {
|
||||
return ErrSetDataTooBig
|
||||
}
|
||||
|
||||
// service may not exceed 512 bytes (might need more testing)
|
||||
if len(service) >= 512 {
|
||||
return ErrSetDataTooBig
|
||||
}
|
||||
|
||||
// service may not exceed 32k but problems occur before that
|
||||
// so we limit it to 30k
|
||||
if len(service) > 1024*30 {
|
||||
return ErrSetDataTooBig
|
||||
}
|
||||
|
||||
cred := wincred.NewGenericCredential(service)
|
||||
cred.CredentialBlob = []byte(credential)
|
||||
cred.Persist = wincred.PersistLocalMachine
|
||||
return cred.Write()
|
||||
}
|
||||
|
||||
func (operatingSystemKeyring) Get(service string) ([]byte, error) {
|
||||
cred, err := wincred.GetGenericCredential(service)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.ERROR_NOT_FOUND) {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return cred.CredentialBlob, nil
|
||||
}
|
||||
|
||||
func (operatingSystemKeyring) Delete(service string) error {
|
||||
cred, err := wincred.GetGenericCredential(service)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.ERROR_NOT_FOUND) {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
return err
|
||||
}
|
||||
return cred.Delete()
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package sessionstore_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/danieljoos/wincred"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/cli/sessionstore"
|
||||
)
|
||||
|
||||
func TestWindowsKeyring_WriteReadDelete(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const testURL = "http://127.0.0.1:1337"
|
||||
srvURL, err := url.Parse(testURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
serviceName := keyringTestServiceName(t)
|
||||
backend := sessionstore.NewKeyringWithService(serviceName)
|
||||
t.Cleanup(func() { _ = backend.Delete(srvURL) })
|
||||
|
||||
// Verify no token exists initially
|
||||
_, err = backend.Read(srvURL)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
|
||||
// Write a token
|
||||
const inputToken = "test-token-12345"
|
||||
err = backend.Write(srvURL, inputToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the credential is stored in Windows Credential Manager with correct format
|
||||
winCred, err := wincred.GetGenericCredential(serviceName)
|
||||
require.NoError(t, err, "getting windows credential")
|
||||
|
||||
var storedCreds map[string]struct {
|
||||
CoderURL string `json:"coder_url"`
|
||||
APIToken string `json:"api_token"`
|
||||
}
|
||||
err = json.Unmarshal(winCred.CredentialBlob, &storedCreds)
|
||||
require.NoError(t, err, "unmarshalling stored credentials")
|
||||
|
||||
// Verify the stored values
|
||||
require.Len(t, storedCreds, 1)
|
||||
cred, ok := storedCreds[srvURL.Host]
|
||||
require.True(t, ok, "credential for URL should exist")
|
||||
require.Equal(t, inputToken, cred.APIToken)
|
||||
require.Equal(t, srvURL.Host, cred.CoderURL)
|
||||
|
||||
// Read the token back
|
||||
token, err := backend.Read(srvURL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, inputToken, token)
|
||||
|
||||
// Delete the token
|
||||
err = backend.Delete(srvURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify token is deleted
|
||||
_, err = backend.Read(srvURL)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
}
|
||||
|
||||
func TestWindowsKeyring_MultipleServers(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const testURL1 = "http://127.0.0.1:1337"
|
||||
srv1URL, err := url.Parse(testURL1)
|
||||
require.NoError(t, err)
|
||||
|
||||
const testURL2 = "http://127.0.0.1:1338"
|
||||
srv2URL, err := url.Parse(testURL2)
|
||||
require.NoError(t, err)
|
||||
|
||||
serviceName := keyringTestServiceName(t)
|
||||
backend := sessionstore.NewKeyringWithService(serviceName)
|
||||
t.Cleanup(func() {
|
||||
_ = backend.Delete(srv1URL)
|
||||
_ = backend.Delete(srv2URL)
|
||||
})
|
||||
|
||||
// Write token for server 1
|
||||
const token1 = "token-server-1"
|
||||
err = backend.Write(srv1URL, token1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write token for server 2 (should NOT overwrite server 1's token)
|
||||
const token2 = "token-server-2"
|
||||
err = backend.Write(srv2URL, token2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify both credentials are stored in Windows Credential Manager
|
||||
winCred, err := wincred.GetGenericCredential(serviceName)
|
||||
require.NoError(t, err, "getting windows credential")
|
||||
|
||||
var storedCreds map[string]struct {
|
||||
CoderURL string `json:"coder_url"`
|
||||
APIToken string `json:"api_token"`
|
||||
}
|
||||
err = json.Unmarshal(winCred.CredentialBlob, &storedCreds)
|
||||
require.NoError(t, err, "unmarshalling stored credentials")
|
||||
|
||||
// Both credentials should exist
|
||||
require.Len(t, storedCreds, 2)
|
||||
require.Equal(t, token1, storedCreds[srv1URL.Host].APIToken)
|
||||
require.Equal(t, token2, storedCreds[srv2URL.Host].APIToken)
|
||||
|
||||
// Read individual credentials
|
||||
token, err := backend.Read(srv1URL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, token1, token)
|
||||
|
||||
token, err = backend.Read(srv2URL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, token2, token)
|
||||
|
||||
// Cleanup
|
||||
err = backend.Delete(srv1URL)
|
||||
require.NoError(t, err)
|
||||
err = backend.Delete(srv2URL)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
Vendored
-5
@@ -108,11 +108,6 @@ variables or flags.
|
||||
--url url, $CODER_URL
|
||||
URL to a deployment.
|
||||
|
||||
--use-keyring bool, $CODER_USE_KEYRING
|
||||
Store and retrieve session tokens using the operating system keyring.
|
||||
Currently only supported on Windows. By default, tokens are stored in
|
||||
plain text files.
|
||||
|
||||
-v, --verbose bool, $CODER_VERBOSE
|
||||
Enable verbose output.
|
||||
|
||||
|
||||
-4
@@ -5,10 +5,6 @@ USAGE:
|
||||
|
||||
Authenticate with Coder deployment
|
||||
|
||||
By default, the session token is stored in a plain text file. Use the
|
||||
--use-keyring flag or set CODER_USE_KEYRING=true to store the token in the
|
||||
operating system keyring instead.
|
||||
|
||||
OPTIONS:
|
||||
--first-user-email string, $CODER_FIRST_USER_EMAIL
|
||||
Specifies an email address to use if creating the first user for the
|
||||
|
||||
-5
@@ -81,11 +81,6 @@ OPTIONS:
|
||||
check is performed once per day.
|
||||
|
||||
AIBRIDGE OPTIONS:
|
||||
--aibridge-inject-coder-mcp-tools bool, $CODER_AIBRIDGE_INJECT_CODER_MCP_TOOLS (default: false)
|
||||
Whether to inject Coder's MCP tools into intercepted AI Bridge
|
||||
requests (requires the "oauth2" and "mcp-server-http" experiments to
|
||||
be enabled).
|
||||
|
||||
--aibridge-anthropic-base-url string, $CODER_AIBRIDGE_ANTHROPIC_BASE_URL (default: https://api.anthropic.com/)
|
||||
The base URL of the Anthropic API.
|
||||
|
||||
|
||||
-4
@@ -747,7 +747,3 @@ aibridge:
|
||||
# https://docs.claude.com/en/docs/claude-code/settings#environment-variables.
|
||||
# (default: global.anthropic.claude-haiku-4-5-20251001-v1:0, type: string)
|
||||
bedrock_small_fast_model: global.anthropic.claude-haiku-4-5-20251001-v1:0
|
||||
# Whether to inject Coder's MCP tools into intercepted AI Bridge requests
|
||||
# (requires the "oauth2" and "mcp-server-http" experiments to be enabled).
|
||||
# (default: false, type: bool)
|
||||
inject_coder_mcp_tools: false
|
||||
|
||||
+55
-22
@@ -7,6 +7,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -23,12 +24,62 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/rbac/policy"
|
||||
"github.com/coder/coder/v2/coderd/searchquery"
|
||||
"github.com/coder/coder/v2/coderd/taskname"
|
||||
"github.com/coder/coder/v2/coderd/util/slice"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
|
||||
aiagentapi "github.com/coder/agentapi-sdk-go"
|
||||
)
|
||||
|
||||
// This endpoint is experimental and not guaranteed to be stable, so we're not
|
||||
// generating public-facing documentation for it.
|
||||
func (api *API) aiTasksPrompts(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
buildIDsParam := r.URL.Query().Get("build_ids")
|
||||
if buildIDsParam == "" {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "build_ids query parameter is required",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Parse build IDs
|
||||
buildIDStrings := strings.Split(buildIDsParam, ",")
|
||||
buildIDs := make([]uuid.UUID, 0, len(buildIDStrings))
|
||||
for _, idStr := range buildIDStrings {
|
||||
id, err := uuid.Parse(strings.TrimSpace(idStr))
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: fmt.Sprintf("Invalid build ID format: %s", idStr),
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
buildIDs = append(buildIDs, id)
|
||||
}
|
||||
|
||||
parameters, err := api.Database.GetWorkspaceBuildParametersByBuildIDs(ctx, buildIDs)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error fetching workspace build parameters.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
promptsByBuildID := make(map[string]string, len(parameters))
|
||||
for _, param := range parameters {
|
||||
if param.Name != codersdk.AITaskPromptParameterName {
|
||||
continue
|
||||
}
|
||||
buildID := param.WorkspaceBuildID.String()
|
||||
promptsByBuildID[buildID] = param.Value
|
||||
}
|
||||
|
||||
httpapi.Write(ctx, rw, http.StatusOK, codersdk.AITasksPromptsResponse{
|
||||
Prompts: promptsByBuildID,
|
||||
})
|
||||
}
|
||||
|
||||
// @Summary Create a new AI task
|
||||
// @Description: EXPERIMENTAL: this endpoint is experimental and not guaranteed to be stable.
|
||||
// @ID create-task
|
||||
@@ -123,31 +174,13 @@ func (api *API) tasksCreate(rw http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the template defines the AI Prompt parameter.
|
||||
templateParams, err := api.Database.GetTemplateVersionParameters(ctx, req.TemplateVersionID)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error fetching template parameters.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
var richParams []codersdk.WorkspaceBuildParameter
|
||||
if _, hasAIPromptParam := slice.Find(templateParams, func(param database.TemplateVersionParameter) bool {
|
||||
return param.Name == codersdk.AITaskPromptParameterName
|
||||
}); hasAIPromptParam {
|
||||
// Only add the AI Prompt parameter if the template defines it.
|
||||
richParams = []codersdk.WorkspaceBuildParameter{
|
||||
{Name: codersdk.AITaskPromptParameterName, Value: req.Input},
|
||||
}
|
||||
}
|
||||
|
||||
createReq := codersdk.CreateWorkspaceRequest{
|
||||
Name: taskName,
|
||||
TemplateVersionID: req.TemplateVersionID,
|
||||
TemplateVersionPresetID: req.TemplateVersionPresetID,
|
||||
RichParameterValues: richParams,
|
||||
RichParameterValues: []codersdk.WorkspaceBuildParameter{
|
||||
{Name: codersdk.AITaskPromptParameterName, Value: req.Input},
|
||||
},
|
||||
}
|
||||
|
||||
var owner workspaceOwner
|
||||
|
||||
+168
-161
@@ -7,8 +7,10 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -33,6 +35,128 @@ import (
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
func TestAITasksPrompts(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("EmptyBuildIDs", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, &coderdtest.Options{})
|
||||
_ = coderdtest.CreateFirstUser(t, client)
|
||||
experimentalClient := codersdk.NewExperimentalClient(client)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
// Test with empty build IDs
|
||||
prompts, err := experimentalClient.AITaskPrompts(ctx, []uuid.UUID{})
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, prompts.Prompts)
|
||||
})
|
||||
|
||||
t.Run("MultipleBuilds", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
adminClient := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
first := coderdtest.CreateFirstUser(t, adminClient)
|
||||
memberClient, _ := coderdtest.CreateAnotherUser(t, adminClient, first.OrganizationID)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
// Create a template with parameters
|
||||
version := coderdtest.CreateTemplateVersion(t, adminClient, first.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionPlan: []*proto.Response{{
|
||||
Type: &proto.Response_Plan{
|
||||
Plan: &proto.PlanComplete{
|
||||
Parameters: []*proto.RichParameter{
|
||||
{
|
||||
Name: "param1",
|
||||
Type: "string",
|
||||
DefaultValue: "default1",
|
||||
},
|
||||
{
|
||||
Name: codersdk.AITaskPromptParameterName,
|
||||
Type: "string",
|
||||
DefaultValue: "default2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
ProvisionApply: echo.ApplyComplete,
|
||||
})
|
||||
template := coderdtest.CreateTemplate(t, adminClient, first.OrganizationID, version.ID)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, adminClient, version.ID)
|
||||
|
||||
// Create two workspaces with different parameters
|
||||
workspace1 := coderdtest.CreateWorkspace(t, memberClient, template.ID, func(request *codersdk.CreateWorkspaceRequest) {
|
||||
request.RichParameterValues = []codersdk.WorkspaceBuildParameter{
|
||||
{Name: "param1", Value: "value1a"},
|
||||
{Name: codersdk.AITaskPromptParameterName, Value: "value2a"},
|
||||
}
|
||||
})
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, memberClient, workspace1.LatestBuild.ID)
|
||||
|
||||
workspace2 := coderdtest.CreateWorkspace(t, memberClient, template.ID, func(request *codersdk.CreateWorkspaceRequest) {
|
||||
request.RichParameterValues = []codersdk.WorkspaceBuildParameter{
|
||||
{Name: "param1", Value: "value1b"},
|
||||
{Name: codersdk.AITaskPromptParameterName, Value: "value2b"},
|
||||
}
|
||||
})
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, memberClient, workspace2.LatestBuild.ID)
|
||||
|
||||
workspace3 := coderdtest.CreateWorkspace(t, adminClient, template.ID, func(request *codersdk.CreateWorkspaceRequest) {
|
||||
request.RichParameterValues = []codersdk.WorkspaceBuildParameter{
|
||||
{Name: "param1", Value: "value1c"},
|
||||
{Name: codersdk.AITaskPromptParameterName, Value: "value2c"},
|
||||
}
|
||||
})
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, adminClient, workspace3.LatestBuild.ID)
|
||||
allBuildIDs := []uuid.UUID{workspace1.LatestBuild.ID, workspace2.LatestBuild.ID, workspace3.LatestBuild.ID}
|
||||
|
||||
experimentalMemberClient := codersdk.NewExperimentalClient(memberClient)
|
||||
// Test parameters endpoint as member
|
||||
prompts, err := experimentalMemberClient.AITaskPrompts(ctx, allBuildIDs)
|
||||
require.NoError(t, err)
|
||||
// we expect 2 prompts because the member client does not have access to workspace3
|
||||
// since it was created by the admin client
|
||||
require.Len(t, prompts.Prompts, 2)
|
||||
|
||||
// Check workspace1 parameters
|
||||
build1Prompt := prompts.Prompts[workspace1.LatestBuild.ID.String()]
|
||||
require.Equal(t, "value2a", build1Prompt)
|
||||
|
||||
// Check workspace2 parameters
|
||||
build2Prompt := prompts.Prompts[workspace2.LatestBuild.ID.String()]
|
||||
require.Equal(t, "value2b", build2Prompt)
|
||||
|
||||
experimentalAdminClient := codersdk.NewExperimentalClient(adminClient)
|
||||
// Test parameters endpoint as admin
|
||||
// we expect 3 prompts because the admin client has access to all workspaces
|
||||
prompts, err = experimentalAdminClient.AITaskPrompts(ctx, allBuildIDs)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, prompts.Prompts, 3)
|
||||
|
||||
// Check workspace3 parameters
|
||||
build3Prompt := prompts.Prompts[workspace3.LatestBuild.ID.String()]
|
||||
require.Equal(t, "value2c", build3Prompt)
|
||||
})
|
||||
|
||||
t.Run("NonExistentBuildIDs", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, &coderdtest.Options{})
|
||||
_ = coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
// Test with non-existent build IDs
|
||||
nonExistentID := uuid.New()
|
||||
experimentalClient := codersdk.NewExperimentalClient(client)
|
||||
prompts, err := experimentalClient.AITaskPrompts(ctx, []uuid.UUID{nonExistentID})
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, prompts.Prompts)
|
||||
})
|
||||
}
|
||||
|
||||
func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -64,6 +188,7 @@ func TestTasks(t *testing.T) {
|
||||
{
|
||||
Type: &proto.Response_Plan{
|
||||
Plan: &proto.PlanComplete{
|
||||
Parameters: []*proto.RichParameter{{Name: codersdk.AITaskPromptParameterName, Type: "string"}},
|
||||
HasAiTasks: true,
|
||||
},
|
||||
},
|
||||
@@ -157,13 +282,12 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
ctx = testutil.Context(t, testutil.WaitLong)
|
||||
user = coderdtest.CreateFirstUser(t, client)
|
||||
anotherUser, _ = coderdtest.CreateAnotherUser(t, client, user.OrganizationID)
|
||||
template = createAITemplate(t, client, user)
|
||||
wantPrompt = "review my code"
|
||||
exp = codersdk.NewExperimentalClient(client)
|
||||
client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
ctx = testutil.Context(t, testutil.WaitLong)
|
||||
user = coderdtest.CreateFirstUser(t, client)
|
||||
template = createAITemplate(t, client, user)
|
||||
wantPrompt = "review my code"
|
||||
exp = codersdk.NewExperimentalClient(client)
|
||||
)
|
||||
|
||||
task, err := exp.CreateTask(ctx, "me", codersdk.CreateTaskRequest{
|
||||
@@ -213,24 +337,6 @@ func TestTasks(t *testing.T) {
|
||||
assert.Equal(t, taskAppID, updated.WorkspaceAppID.UUID, "workspace app id should match")
|
||||
assert.NotEmpty(t, updated.WorkspaceStatus, "task status should not be empty")
|
||||
|
||||
// Fetch the task by name and verify the same result
|
||||
byName, err := exp.TaskByOwnerAndName(ctx, codersdk.Me, task.Name)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, byName, updated)
|
||||
|
||||
// Another member user should not be able to fetch the task
|
||||
otherClient := codersdk.NewExperimentalClient(anotherUser)
|
||||
_, err = otherClient.TaskByID(ctx, task.ID)
|
||||
require.Error(t, err, "fetching task should fail by ID for another member user")
|
||||
var sdkErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
require.Equal(t, http.StatusNotFound, sdkErr.StatusCode())
|
||||
// Also test by name
|
||||
_, err = otherClient.TaskByOwnerAndName(ctx, task.OwnerName, task.Name)
|
||||
require.Error(t, err, "fetching task should fail by name for another member user")
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
require.Equal(t, http.StatusNotFound, sdkErr.StatusCode())
|
||||
|
||||
// Stop the workspace
|
||||
coderdtest.MustTransitionWorkspace(t, client, task.WorkspaceID.UUID, codersdk.WorkspaceTransitionStart, codersdk.WorkspaceTransitionStop)
|
||||
|
||||
@@ -674,7 +780,7 @@ func TestTasks(t *testing.T) {
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID)
|
||||
|
||||
// Fetch the task by ID via experimental API and verify fields.
|
||||
task, err = exp.TaskByIdentifier(ctx, task.ID.String())
|
||||
task, err = exp.TaskByID(ctx, task.ID)
|
||||
require.NoError(t, err)
|
||||
require.NotZero(t, task.WorkspaceBuildNumber)
|
||||
require.True(t, task.WorkspaceAgentID.Valid)
|
||||
@@ -751,51 +857,6 @@ func TestTasksCreate(t *testing.T) {
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionApply: echo.ApplyComplete,
|
||||
ProvisionPlan: []*proto.Response{
|
||||
{Type: &proto.Response_Plan{Plan: &proto.PlanComplete{
|
||||
HasAiTasks: true,
|
||||
}}},
|
||||
},
|
||||
})
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
|
||||
expClient := codersdk.NewExperimentalClient(client)
|
||||
|
||||
task, err := expClient.CreateTask(ctx, "me", codersdk.CreateTaskRequest{
|
||||
TemplateVersionID: template.ActiveVersionID,
|
||||
Input: taskPrompt,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.True(t, task.WorkspaceID.Valid)
|
||||
|
||||
ws, err := client.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID)
|
||||
|
||||
assert.NotEmpty(t, task.Name)
|
||||
assert.Equal(t, template.ID, task.TemplateID)
|
||||
|
||||
parameters, err := client.WorkspaceBuildParameters(ctx, ws.LatestBuild.ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, parameters, 0)
|
||||
})
|
||||
|
||||
t.Run("OK AIPromptBackCompat", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
ctx = testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
taskPrompt = "Some task prompt"
|
||||
)
|
||||
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Given: A template with an "AI Prompt" parameter
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
@@ -875,6 +936,7 @@ func TestTasksCreate(t *testing.T) {
|
||||
ProvisionApply: echo.ApplyComplete,
|
||||
ProvisionPlan: []*proto.Response{
|
||||
{Type: &proto.Response_Plan{Plan: &proto.PlanComplete{
|
||||
Parameters: []*proto.RichParameter{{Name: codersdk.AITaskPromptParameterName, Type: "string"}},
|
||||
HasAiTasks: true,
|
||||
}}},
|
||||
},
|
||||
@@ -990,6 +1052,7 @@ func TestTasksCreate(t *testing.T) {
|
||||
ProvisionApply: echo.ApplyComplete,
|
||||
ProvisionPlan: []*proto.Response{
|
||||
{Type: &proto.Response_Plan{Plan: &proto.PlanComplete{
|
||||
Parameters: []*proto.RichParameter{{Name: codersdk.AITaskPromptParameterName, Type: "string"}},
|
||||
HasAiTasks: true,
|
||||
}}},
|
||||
},
|
||||
@@ -1049,6 +1112,7 @@ func TestTasksCreate(t *testing.T) {
|
||||
ProvisionApply: echo.ApplyComplete,
|
||||
ProvisionPlan: []*proto.Response{
|
||||
{Type: &proto.Response_Plan{Plan: &proto.PlanComplete{
|
||||
Parameters: []*proto.RichParameter{{Name: codersdk.AITaskPromptParameterName, Type: "string"}},
|
||||
HasAiTasks: true,
|
||||
}}},
|
||||
},
|
||||
@@ -1085,6 +1149,7 @@ func TestTasksCreate(t *testing.T) {
|
||||
ProvisionApply: echo.ApplyComplete,
|
||||
ProvisionPlan: []*proto.Response{
|
||||
{Type: &proto.Response_Plan{Plan: &proto.PlanComplete{
|
||||
Parameters: []*proto.RichParameter{{Name: codersdk.AITaskPromptParameterName, Type: "string"}},
|
||||
HasAiTasks: true,
|
||||
}}},
|
||||
},
|
||||
@@ -1137,6 +1202,7 @@ func TestTasksCreate(t *testing.T) {
|
||||
ProvisionApply: echo.ApplyComplete,
|
||||
ProvisionPlan: []*proto.Response{
|
||||
{Type: &proto.Response_Plan{Plan: &proto.PlanComplete{
|
||||
Parameters: []*proto.RichParameter{{Name: codersdk.AITaskPromptParameterName, Type: "string"}},
|
||||
HasAiTasks: true,
|
||||
}}},
|
||||
},
|
||||
@@ -1149,6 +1215,7 @@ func TestTasksCreate(t *testing.T) {
|
||||
ProvisionApply: echo.ApplyComplete,
|
||||
ProvisionPlan: []*proto.Response{
|
||||
{Type: &proto.Response_Plan{Plan: &proto.PlanComplete{
|
||||
Parameters: []*proto.RichParameter{{Name: codersdk.AITaskPromptParameterName, Type: "string"}},
|
||||
HasAiTasks: true,
|
||||
}}},
|
||||
},
|
||||
@@ -1185,7 +1252,6 @@ func TestTasksNotification(t *testing.T) {
|
||||
isNotificationSent bool
|
||||
notificationTemplate uuid.UUID
|
||||
taskPrompt string
|
||||
agentLifecycle database.WorkspaceAgentLifecycleState
|
||||
}{
|
||||
// Should not send a notification when the agent app is not an AI task.
|
||||
{
|
||||
@@ -1233,7 +1299,6 @@ func TestTasksNotification(t *testing.T) {
|
||||
isNotificationSent: true,
|
||||
notificationTemplate: notifications.TemplateTaskIdle,
|
||||
taskPrompt: "InitialTemplateTaskIdle",
|
||||
agentLifecycle: database.WorkspaceAgentLifecycleStateReady,
|
||||
},
|
||||
// Should send TemplateTaskWorking when the AI task transitions to 'Working' from 'Idle'.
|
||||
{
|
||||
@@ -1247,7 +1312,6 @@ func TestTasksNotification(t *testing.T) {
|
||||
isNotificationSent: true,
|
||||
notificationTemplate: notifications.TemplateTaskWorking,
|
||||
taskPrompt: "TemplateTaskWorkingFromIdle",
|
||||
agentLifecycle: database.WorkspaceAgentLifecycleStateReady,
|
||||
},
|
||||
// Should send TemplateTaskIdle when the AI task transitions to 'Idle'.
|
||||
{
|
||||
@@ -1258,7 +1322,6 @@ func TestTasksNotification(t *testing.T) {
|
||||
isNotificationSent: true,
|
||||
notificationTemplate: notifications.TemplateTaskIdle,
|
||||
taskPrompt: "TemplateTaskIdle",
|
||||
agentLifecycle: database.WorkspaceAgentLifecycleStateReady,
|
||||
},
|
||||
// Long task prompts should be truncated to 160 characters.
|
||||
{
|
||||
@@ -1269,7 +1332,6 @@ func TestTasksNotification(t *testing.T) {
|
||||
isNotificationSent: true,
|
||||
notificationTemplate: notifications.TemplateTaskIdle,
|
||||
taskPrompt: "This is a very long task prompt that should be truncated to 160 characters. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.",
|
||||
agentLifecycle: database.WorkspaceAgentLifecycleStateReady,
|
||||
},
|
||||
// Should send TemplateTaskCompleted when the AI task transitions to 'Complete'.
|
||||
{
|
||||
@@ -1280,7 +1342,6 @@ func TestTasksNotification(t *testing.T) {
|
||||
isNotificationSent: true,
|
||||
notificationTemplate: notifications.TemplateTaskCompleted,
|
||||
taskPrompt: "TemplateTaskCompleted",
|
||||
agentLifecycle: database.WorkspaceAgentLifecycleStateReady,
|
||||
},
|
||||
// Should send TemplateTaskFailed when the AI task transitions to 'Failure'.
|
||||
{
|
||||
@@ -1291,7 +1352,6 @@ func TestTasksNotification(t *testing.T) {
|
||||
isNotificationSent: true,
|
||||
notificationTemplate: notifications.TemplateTaskFailed,
|
||||
taskPrompt: "TemplateTaskFailed",
|
||||
agentLifecycle: database.WorkspaceAgentLifecycleStateReady,
|
||||
},
|
||||
// Should send TemplateTaskCompleted when the AI task transitions from 'Idle' to 'Complete'.
|
||||
{
|
||||
@@ -1302,7 +1362,6 @@ func TestTasksNotification(t *testing.T) {
|
||||
isNotificationSent: true,
|
||||
notificationTemplate: notifications.TemplateTaskCompleted,
|
||||
taskPrompt: "TemplateTaskCompletedFromIdle",
|
||||
agentLifecycle: database.WorkspaceAgentLifecycleStateReady,
|
||||
},
|
||||
// Should send TemplateTaskFailed when the AI task transitions from 'Idle' to 'Failure'.
|
||||
{
|
||||
@@ -1313,7 +1372,6 @@ func TestTasksNotification(t *testing.T) {
|
||||
isNotificationSent: true,
|
||||
notificationTemplate: notifications.TemplateTaskFailed,
|
||||
taskPrompt: "TemplateTaskFailedFromIdle",
|
||||
agentLifecycle: database.WorkspaceAgentLifecycleStateReady,
|
||||
},
|
||||
// Should NOT send notification when transitioning from 'Complete' to 'Complete' (no change).
|
||||
{
|
||||
@@ -1333,37 +1391,6 @@ func TestTasksNotification(t *testing.T) {
|
||||
isNotificationSent: false,
|
||||
taskPrompt: "NoNotificationFailureToFailure",
|
||||
},
|
||||
// Should NOT send notification when agent is in 'starting' lifecycle state (agent startup).
|
||||
{
|
||||
name: "AgentStarting_NoNotification",
|
||||
latestAppStatuses: nil,
|
||||
newAppStatus: codersdk.WorkspaceAppStatusStateIdle,
|
||||
isAITask: true,
|
||||
isNotificationSent: false,
|
||||
taskPrompt: "AgentStarting_NoNotification",
|
||||
agentLifecycle: database.WorkspaceAgentLifecycleStateStarting,
|
||||
},
|
||||
// Should NOT send notification when agent is in 'created' lifecycle state (agent not started).
|
||||
{
|
||||
name: "AgentCreated_NoNotification",
|
||||
latestAppStatuses: []codersdk.WorkspaceAppStatusState{codersdk.WorkspaceAppStatusStateWorking},
|
||||
newAppStatus: codersdk.WorkspaceAppStatusStateIdle,
|
||||
isAITask: true,
|
||||
isNotificationSent: false,
|
||||
taskPrompt: "AgentCreated_NoNotification",
|
||||
agentLifecycle: database.WorkspaceAgentLifecycleStateCreated,
|
||||
},
|
||||
// Should send notification when agent is in 'ready' lifecycle state (agent fully started).
|
||||
{
|
||||
name: "AgentReady_SendNotification",
|
||||
latestAppStatuses: []codersdk.WorkspaceAppStatusState{codersdk.WorkspaceAppStatusStateWorking},
|
||||
newAppStatus: codersdk.WorkspaceAppStatusStateIdle,
|
||||
isAITask: true,
|
||||
isNotificationSent: true,
|
||||
notificationTemplate: notifications.TemplateTaskIdle,
|
||||
taskPrompt: "AgentReady_SendNotification",
|
||||
agentLifecycle: database.WorkspaceAgentLifecycleStateReady,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
@@ -1382,57 +1409,31 @@ func TestTasksNotification(t *testing.T) {
|
||||
// Given: a workspace build with an agent containing an App
|
||||
workspaceAgentAppID := uuid.New()
|
||||
workspaceBuildID := uuid.New()
|
||||
workspaceBuilder := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
workspaceBuildSeed := database.WorkspaceBuild{
|
||||
ID: workspaceBuildID,
|
||||
}
|
||||
if tc.isAITask {
|
||||
workspaceBuildSeed = database.WorkspaceBuild{
|
||||
ID: workspaceBuildID,
|
||||
// AI Task configuration
|
||||
HasAITask: sql.NullBool{Bool: true, Valid: true},
|
||||
AITaskSidebarAppID: uuid.NullUUID{UUID: workspaceAgentAppID, Valid: true},
|
||||
}
|
||||
}
|
||||
workspaceBuild := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: ownerUser.OrganizationID,
|
||||
OwnerID: memberUser.ID,
|
||||
}).Seed(database.WorkspaceBuild{
|
||||
ID: workspaceBuildID,
|
||||
})
|
||||
if tc.isAITask {
|
||||
workspaceBuilder = workspaceBuilder.
|
||||
WithTask(database.TaskTable{
|
||||
Prompt: tc.taskPrompt,
|
||||
}, &proto.App{
|
||||
Id: workspaceAgentAppID.String(),
|
||||
Slug: "ccw",
|
||||
})
|
||||
} else {
|
||||
workspaceBuilder = workspaceBuilder.
|
||||
WithAgent(func(agent []*proto.Agent) []*proto.Agent {
|
||||
agent[0].Apps = []*proto.App{{
|
||||
Id: workspaceAgentAppID.String(),
|
||||
Slug: "ccw",
|
||||
}}
|
||||
return agent
|
||||
})
|
||||
}
|
||||
workspaceBuild := workspaceBuilder.Do()
|
||||
|
||||
// Given: set the agent lifecycle state if specified
|
||||
if tc.agentLifecycle != "" {
|
||||
workspace := coderdtest.MustWorkspace(t, client, workspaceBuild.Workspace.ID)
|
||||
agentID := workspace.LatestBuild.Resources[0].Agents[0].ID
|
||||
|
||||
var (
|
||||
startedAt sql.NullTime
|
||||
readyAt sql.NullTime
|
||||
)
|
||||
if tc.agentLifecycle == database.WorkspaceAgentLifecycleStateReady {
|
||||
startedAt = sql.NullTime{Time: dbtime.Now(), Valid: true}
|
||||
readyAt = sql.NullTime{Time: dbtime.Now(), Valid: true}
|
||||
} else if tc.agentLifecycle == database.WorkspaceAgentLifecycleStateStarting {
|
||||
startedAt = sql.NullTime{Time: dbtime.Now(), Valid: true}
|
||||
}
|
||||
|
||||
// nolint:gocritic // This is a system restricted operation for test setup.
|
||||
err := db.UpdateWorkspaceAgentLifecycleStateByID(dbauthz.AsSystemRestricted(ctx), database.UpdateWorkspaceAgentLifecycleStateByIDParams{
|
||||
ID: agentID,
|
||||
LifecycleState: tc.agentLifecycle,
|
||||
StartedAt: startedAt,
|
||||
ReadyAt: readyAt,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}).Seed(workspaceBuildSeed).Params(database.WorkspaceBuildParameter{
|
||||
WorkspaceBuildID: workspaceBuildID,
|
||||
Name: codersdk.AITaskPromptParameterName,
|
||||
Value: tc.taskPrompt,
|
||||
}).WithAgent(func(agent []*proto.Agent) []*proto.Agent {
|
||||
agent[0].Apps = []*proto.App{{
|
||||
Id: workspaceAgentAppID.String(),
|
||||
Slug: "ccw",
|
||||
}}
|
||||
return agent
|
||||
}).Do()
|
||||
|
||||
// Given: the workspace agent app has previous statuses
|
||||
agentClient := agentsdk.New(client.URL, agentsdk.WithFixedToken(workspaceBuild.AgentToken))
|
||||
@@ -1473,7 +1474,13 @@ func TestTasksNotification(t *testing.T) {
|
||||
require.Len(t, sent, 1)
|
||||
require.Equal(t, memberUser.ID, sent[0].UserID)
|
||||
require.Len(t, sent[0].Labels, 2)
|
||||
require.Equal(t, workspaceBuild.Task.Name, sent[0].Labels["task"])
|
||||
// NOTE: len(string) is the number of bytes in the string, not the number of runes.
|
||||
require.LessOrEqual(t, utf8.RuneCountInString(sent[0].Labels["task"]), 160)
|
||||
if len(tc.taskPrompt) > 160 {
|
||||
require.Contains(t, tc.taskPrompt, strings.TrimSuffix(sent[0].Labels["task"], "…"))
|
||||
} else {
|
||||
require.Equal(t, tc.taskPrompt, sent[0].Labels["task"])
|
||||
}
|
||||
require.Equal(t, workspace.Name, sent[0].Labels["workspace"])
|
||||
} else {
|
||||
// Then: No notification is sent
|
||||
|
||||
Generated
+5
-10
@@ -11700,9 +11700,6 @@ const docTemplate = `{
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"inject_coder_mcp_tools": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"openai": {
|
||||
"$ref": "#/definitions/codersdk.AIBridgeOpenAIConfig"
|
||||
}
|
||||
@@ -11711,9 +11708,6 @@ const docTemplate = `{
|
||||
"codersdk.AIBridgeInterception": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"api_key_id": {
|
||||
"type": "string"
|
||||
},
|
||||
"ended_at": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
@@ -15441,9 +15435,6 @@ const docTemplate = `{
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"revocation_endpoint": {
|
||||
"type": "string"
|
||||
},
|
||||
"scopes_supported": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
@@ -20535,6 +20526,11 @@ const docTemplate = `{
|
||||
"codersdk.WorkspaceBuild": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ai_task_sidebar_app_id": {
|
||||
"description": "Deprecated: This field has been replaced with ` + "`" + `Task.WorkspaceAppID` + "`" + `",
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"build_number": {
|
||||
"type": "integer"
|
||||
},
|
||||
@@ -20550,7 +20546,6 @@ const docTemplate = `{
|
||||
"format": "date-time"
|
||||
},
|
||||
"has_ai_task": {
|
||||
"description": "Deprecated: This field has been deprecated in favor of Task WorkspaceID.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"has_external_agent": {
|
||||
|
||||
Generated
+5
-10
@@ -10396,9 +10396,6 @@
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"inject_coder_mcp_tools": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"openai": {
|
||||
"$ref": "#/definitions/codersdk.AIBridgeOpenAIConfig"
|
||||
}
|
||||
@@ -10407,9 +10404,6 @@
|
||||
"codersdk.AIBridgeInterception": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"api_key_id": {
|
||||
"type": "string"
|
||||
},
|
||||
"ended_at": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
@@ -13995,9 +13989,6 @@
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"revocation_endpoint": {
|
||||
"type": "string"
|
||||
},
|
||||
"scopes_supported": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
@@ -18869,6 +18860,11 @@
|
||||
"codersdk.WorkspaceBuild": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ai_task_sidebar_app_id": {
|
||||
"description": "Deprecated: This field has been replaced with `Task.WorkspaceAppID`",
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"build_number": {
|
||||
"type": "integer"
|
||||
},
|
||||
@@ -18884,7 +18880,6 @@
|
||||
"format": "date-time"
|
||||
},
|
||||
"has_ai_task": {
|
||||
"description": "Deprecated: This field has been deprecated in favor of Task WorkspaceID.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"has_external_agent": {
|
||||
|
||||
+4
-1
@@ -1021,7 +1021,10 @@ func New(options *Options) *API {
|
||||
apiRateLimiter,
|
||||
httpmw.ReportCLITelemetry(api.Logger, options.Telemetry),
|
||||
)
|
||||
|
||||
r.Route("/aitasks", func(r chi.Router) {
|
||||
r.Use(apiKeyMiddleware)
|
||||
r.Get("/prompts", api.aiTasksPrompts)
|
||||
})
|
||||
r.Route("/tasks", func(r chi.Router) {
|
||||
r.Use(apiKeyMiddleware)
|
||||
|
||||
|
||||
@@ -1604,7 +1604,7 @@ func (nopcloser) Close() error { return nil }
|
||||
// SDKError coerces err into an SDK error.
|
||||
func SDKError(t testing.TB, err error) *codersdk.Error {
|
||||
var cerr *codersdk.Error
|
||||
require.True(t, errors.As(err, &cerr), "should be SDK error, got %s", err)
|
||||
require.True(t, errors.As(err, &cerr), "should be SDK error, got %w", err)
|
||||
return cerr
|
||||
}
|
||||
|
||||
|
||||
@@ -6,14 +6,15 @@ type CheckConstraint string
|
||||
|
||||
// CheckConstraint enums.
|
||||
const (
|
||||
CheckAPIKeysAllowListNotEmpty CheckConstraint = "api_keys_allow_list_not_empty" // api_keys
|
||||
CheckOneTimePasscodeSet CheckConstraint = "one_time_passcode_set" // users
|
||||
CheckUsersUsernameMinLength CheckConstraint = "users_username_min_length" // users
|
||||
CheckMaxProvisionerLogsLength CheckConstraint = "max_provisioner_logs_length" // provisioner_jobs
|
||||
CheckMaxLogsLength CheckConstraint = "max_logs_length" // workspace_agents
|
||||
CheckSubsystemsNotNone CheckConstraint = "subsystems_not_none" // workspace_agents
|
||||
CheckWorkspaceBuildsDeadlineBelowMaxDeadline CheckConstraint = "workspace_builds_deadline_below_max_deadline" // workspace_builds
|
||||
CheckTelemetryLockEventTypeConstraint CheckConstraint = "telemetry_lock_event_type_constraint" // telemetry_locks
|
||||
CheckValidationMonotonicOrder CheckConstraint = "validation_monotonic_order" // template_version_parameters
|
||||
CheckUsageEventTypeCheck CheckConstraint = "usage_event_type_check" // usage_events
|
||||
CheckAPIKeysAllowListNotEmpty CheckConstraint = "api_keys_allow_list_not_empty" // api_keys
|
||||
CheckOneTimePasscodeSet CheckConstraint = "one_time_passcode_set" // users
|
||||
CheckUsersUsernameMinLength CheckConstraint = "users_username_min_length" // users
|
||||
CheckMaxProvisionerLogsLength CheckConstraint = "max_provisioner_logs_length" // provisioner_jobs
|
||||
CheckMaxLogsLength CheckConstraint = "max_logs_length" // workspace_agents
|
||||
CheckSubsystemsNotNone CheckConstraint = "subsystems_not_none" // workspace_agents
|
||||
CheckWorkspaceBuildsAiTaskSidebarAppIDRequired CheckConstraint = "workspace_builds_ai_task_sidebar_app_id_required" // workspace_builds
|
||||
CheckWorkspaceBuildsDeadlineBelowMaxDeadline CheckConstraint = "workspace_builds_deadline_below_max_deadline" // workspace_builds
|
||||
CheckTelemetryLockEventTypeConstraint CheckConstraint = "telemetry_lock_event_type_constraint" // telemetry_locks
|
||||
CheckValidationMonotonicOrder CheckConstraint = "validation_monotonic_order" // template_version_parameters
|
||||
CheckUsageEventTypeCheck CheckConstraint = "usage_event_type_check" // usage_events
|
||||
)
|
||||
|
||||
@@ -974,9 +974,6 @@ func AIBridgeInterception(interception database.AIBridgeInterception, initiator
|
||||
UserPrompts: sdkUserPrompts,
|
||||
ToolUsages: sdkToolUsages,
|
||||
}
|
||||
if interception.APIKeyID.Valid {
|
||||
intc.APIKeyID = &interception.APIKeyID.String
|
||||
}
|
||||
if interception.EndedAt.Valid {
|
||||
intc.EndedAt = &interception.EndedAt.Time
|
||||
}
|
||||
|
||||
@@ -2989,10 +2989,6 @@ func (q *querier) GetTaskByID(ctx context.Context, id uuid.UUID) (database.Task,
|
||||
return fetch(q.log, q.auth, q.db.GetTaskByID)(ctx, id)
|
||||
}
|
||||
|
||||
func (q *querier) GetTaskByOwnerIDAndName(ctx context.Context, arg database.GetTaskByOwnerIDAndNameParams) (database.Task, error) {
|
||||
return fetch(q.log, q.auth, q.db.GetTaskByOwnerIDAndName)(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) GetTaskByWorkspaceID(ctx context.Context, workspaceID uuid.UUID) (database.Task, error) {
|
||||
return fetch(q.log, q.auth, q.db.GetTaskByWorkspaceID)(ctx, workspaceID)
|
||||
}
|
||||
|
||||
@@ -2161,7 +2161,7 @@ func (s *MethodTestSuite) TestWorkspace() {
|
||||
})
|
||||
res := testutil.Fake(s.T(), faker, database.WorkspaceResource{JobID: b.JobID})
|
||||
agt := testutil.Fake(s.T(), faker, database.WorkspaceAgent{ResourceID: res.ID})
|
||||
_ = testutil.Fake(s.T(), faker, database.WorkspaceApp{AgentID: agt.ID})
|
||||
app := testutil.Fake(s.T(), faker, database.WorkspaceApp{AgentID: agt.ID})
|
||||
|
||||
dbm.EXPECT().GetWorkspaceByID(gomock.Any(), w.ID).Return(w, nil).AnyTimes()
|
||||
dbm.EXPECT().GetWorkspaceBuildByID(gomock.Any(), b.ID).Return(b, nil).AnyTimes()
|
||||
@@ -2170,6 +2170,7 @@ func (s *MethodTestSuite) TestWorkspace() {
|
||||
ID: b.ID,
|
||||
HasAITask: sql.NullBool{Bool: true, Valid: true},
|
||||
HasExternalAgent: sql.NullBool{Bool: true, Valid: true},
|
||||
SidebarAppID: uuid.NullUUID{UUID: app.ID, Valid: true},
|
||||
UpdatedAt: b.UpdatedAt,
|
||||
}).Asserts(w, policy.ActionUpdate)
|
||||
}))
|
||||
@@ -2374,17 +2375,6 @@ func (s *MethodTestSuite) TestTasks() {
|
||||
dbm.EXPECT().GetTaskByID(gomock.Any(), task.ID).Return(task, nil).AnyTimes()
|
||||
check.Args(task.ID).Asserts(task, policy.ActionRead).Returns(task)
|
||||
}))
|
||||
s.Run("GetTaskByOwnerIDAndName", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
task := testutil.Fake(s.T(), faker, database.Task{})
|
||||
dbm.EXPECT().GetTaskByOwnerIDAndName(gomock.Any(), database.GetTaskByOwnerIDAndNameParams{
|
||||
OwnerID: task.OwnerID,
|
||||
Name: task.Name,
|
||||
}).Return(task, nil).AnyTimes()
|
||||
check.Args(database.GetTaskByOwnerIDAndNameParams{
|
||||
OwnerID: task.OwnerID,
|
||||
Name: task.Name,
|
||||
}).Asserts(task, policy.ActionRead).Returns(task)
|
||||
}))
|
||||
s.Run("DeleteTask", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
task := testutil.Fake(s.T(), faker, database.Task{})
|
||||
arg := database.DeleteTaskParams{
|
||||
|
||||
@@ -189,6 +189,7 @@ func (b WorkspaceBuildBuilder) doInTX() WorkspaceResponse {
|
||||
Bool: true,
|
||||
Valid: true,
|
||||
}
|
||||
b.seed.AITaskSidebarAppID = uuid.NullUUID{UUID: b.taskAppID, Valid: true}
|
||||
}
|
||||
|
||||
resp := WorkspaceResponse{
|
||||
|
||||
@@ -451,6 +451,7 @@ func WorkspaceBuild(t testing.TB, db database.Store, orig database.WorkspaceBuil
|
||||
buildID := takeFirst(orig.ID, uuid.New())
|
||||
jobID := takeFirst(orig.JobID, uuid.New())
|
||||
hasAITask := takeFirst(orig.HasAITask, sql.NullBool{})
|
||||
sidebarAppID := takeFirst(orig.AITaskSidebarAppID, uuid.NullUUID{})
|
||||
hasExternalAgent := takeFirst(orig.HasExternalAgent, sql.NullBool{})
|
||||
var build database.WorkspaceBuild
|
||||
err := db.InTx(func(db database.Store) error {
|
||||
@@ -490,6 +491,7 @@ func WorkspaceBuild(t testing.TB, db database.Store, orig database.WorkspaceBuil
|
||||
ID: buildID,
|
||||
HasAITask: hasAITask,
|
||||
HasExternalAgent: hasExternalAgent,
|
||||
SidebarAppID: sidebarAppID,
|
||||
UpdatedAt: dbtime.Now(),
|
||||
}))
|
||||
}
|
||||
@@ -1496,7 +1498,6 @@ func ClaimPrebuild(
|
||||
func AIBridgeInterception(t testing.TB, db database.Store, seed database.InsertAIBridgeInterceptionParams, endedAt *time.Time) database.AIBridgeInterception {
|
||||
interception, err := db.InsertAIBridgeInterception(genCtx, database.InsertAIBridgeInterceptionParams{
|
||||
ID: takeFirst(seed.ID, uuid.New()),
|
||||
APIKeyID: seed.APIKeyID,
|
||||
InitiatorID: takeFirst(seed.InitiatorID, uuid.New()),
|
||||
Provider: takeFirst(seed.Provider, "provider"),
|
||||
Model: takeFirst(seed.Model, "model"),
|
||||
|
||||
@@ -1530,13 +1530,6 @@ func (m queryMetricsStore) GetTaskByID(ctx context.Context, id uuid.UUID) (datab
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetTaskByOwnerIDAndName(ctx context.Context, arg database.GetTaskByOwnerIDAndNameParams) (database.Task, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetTaskByOwnerIDAndName(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("GetTaskByOwnerIDAndName").Observe(time.Since(start).Seconds())
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetTaskByWorkspaceID(ctx context.Context, workspaceID uuid.UUID) (database.Task, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetTaskByWorkspaceID(ctx, workspaceID)
|
||||
|
||||
@@ -3237,21 +3237,6 @@ func (mr *MockStoreMockRecorder) GetTaskByID(ctx, id any) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskByID", reflect.TypeOf((*MockStore)(nil).GetTaskByID), ctx, id)
|
||||
}
|
||||
|
||||
// GetTaskByOwnerIDAndName mocks base method.
|
||||
func (m *MockStore) GetTaskByOwnerIDAndName(ctx context.Context, arg database.GetTaskByOwnerIDAndNameParams) (database.Task, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetTaskByOwnerIDAndName", ctx, arg)
|
||||
ret0, _ := ret[0].(database.Task)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetTaskByOwnerIDAndName indicates an expected call of GetTaskByOwnerIDAndName.
|
||||
func (mr *MockStoreMockRecorder) GetTaskByOwnerIDAndName(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskByOwnerIDAndName", reflect.TypeOf((*MockStore)(nil).GetTaskByOwnerIDAndName), ctx, arg)
|
||||
}
|
||||
|
||||
// GetTaskByWorkspaceID mocks base method.
|
||||
func (m *MockStore) GetTaskByWorkspaceID(ctx context.Context, workspaceID uuid.UUID) (database.Task, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
||||
Generated
+7
-2
@@ -1056,8 +1056,7 @@ CREATE TABLE aibridge_interceptions (
|
||||
model text NOT NULL,
|
||||
started_at timestamp with time zone NOT NULL,
|
||||
metadata jsonb,
|
||||
ended_at timestamp with time zone,
|
||||
api_key_id text
|
||||
ended_at timestamp with time zone
|
||||
);
|
||||
|
||||
COMMENT ON TABLE aibridge_interceptions IS 'Audit log of requests intercepted by AI Bridge';
|
||||
@@ -1949,7 +1948,9 @@ CREATE TABLE workspace_builds (
|
||||
max_deadline timestamp with time zone DEFAULT '0001-01-01 00:00:00+00'::timestamp with time zone NOT NULL,
|
||||
template_version_preset_id uuid,
|
||||
has_ai_task boolean,
|
||||
ai_task_sidebar_app_id uuid,
|
||||
has_external_agent boolean,
|
||||
CONSTRAINT workspace_builds_ai_task_sidebar_app_id_required CHECK (((((has_ai_task IS NULL) OR (has_ai_task = false)) AND (ai_task_sidebar_app_id IS NULL)) OR ((has_ai_task = true) AND (ai_task_sidebar_app_id IS NOT NULL)))),
|
||||
CONSTRAINT workspace_builds_deadline_below_max_deadline CHECK ((((deadline <> '0001-01-01 00:00:00+00'::timestamp with time zone) AND (deadline <= max_deadline)) OR (max_deadline = '0001-01-01 00:00:00+00'::timestamp with time zone)))
|
||||
);
|
||||
|
||||
@@ -2702,6 +2703,7 @@ CREATE VIEW workspace_build_with_user AS
|
||||
workspace_builds.max_deadline,
|
||||
workspace_builds.template_version_preset_id,
|
||||
workspace_builds.has_ai_task,
|
||||
workspace_builds.ai_task_sidebar_app_id,
|
||||
workspace_builds.has_external_agent,
|
||||
COALESCE(visible_users.avatar_url, ''::text) AS initiator_by_avatar_url,
|
||||
COALESCE(visible_users.username, ''::text) AS initiator_by_username,
|
||||
@@ -3802,6 +3804,9 @@ ALTER TABLE ONLY workspace_apps
|
||||
ALTER TABLE ONLY workspace_build_parameters
|
||||
ADD CONSTRAINT workspace_build_parameters_workspace_build_id_fkey FOREIGN KEY (workspace_build_id) REFERENCES workspace_builds(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY workspace_builds
|
||||
ADD CONSTRAINT workspace_builds_ai_task_sidebar_app_id_fkey FOREIGN KEY (ai_task_sidebar_app_id) REFERENCES workspace_apps(id);
|
||||
|
||||
ALTER TABLE ONLY workspace_builds
|
||||
ADD CONSTRAINT workspace_builds_job_id_fkey FOREIGN KEY (job_id) REFERENCES provisioner_jobs(id) ON DELETE CASCADE;
|
||||
|
||||
|
||||
@@ -94,6 +94,7 @@ const (
|
||||
ForeignKeyWorkspaceAppStatusesWorkspaceID ForeignKeyConstraint = "workspace_app_statuses_workspace_id_fkey" // ALTER TABLE ONLY workspace_app_statuses ADD CONSTRAINT workspace_app_statuses_workspace_id_fkey FOREIGN KEY (workspace_id) REFERENCES workspaces(id);
|
||||
ForeignKeyWorkspaceAppsAgentID ForeignKeyConstraint = "workspace_apps_agent_id_fkey" // ALTER TABLE ONLY workspace_apps ADD CONSTRAINT workspace_apps_agent_id_fkey FOREIGN KEY (agent_id) REFERENCES workspace_agents(id) ON DELETE CASCADE;
|
||||
ForeignKeyWorkspaceBuildParametersWorkspaceBuildID ForeignKeyConstraint = "workspace_build_parameters_workspace_build_id_fkey" // ALTER TABLE ONLY workspace_build_parameters ADD CONSTRAINT workspace_build_parameters_workspace_build_id_fkey FOREIGN KEY (workspace_build_id) REFERENCES workspace_builds(id) ON DELETE CASCADE;
|
||||
ForeignKeyWorkspaceBuildsAiTaskSidebarAppID ForeignKeyConstraint = "workspace_builds_ai_task_sidebar_app_id_fkey" // ALTER TABLE ONLY workspace_builds ADD CONSTRAINT workspace_builds_ai_task_sidebar_app_id_fkey FOREIGN KEY (ai_task_sidebar_app_id) REFERENCES workspace_apps(id);
|
||||
ForeignKeyWorkspaceBuildsJobID ForeignKeyConstraint = "workspace_builds_job_id_fkey" // ALTER TABLE ONLY workspace_builds ADD CONSTRAINT workspace_builds_job_id_fkey FOREIGN KEY (job_id) REFERENCES provisioner_jobs(id) ON DELETE CASCADE;
|
||||
ForeignKeyWorkspaceBuildsTemplateVersionID ForeignKeyConstraint = "workspace_builds_template_version_id_fkey" // ALTER TABLE ONLY workspace_builds ADD CONSTRAINT workspace_builds_template_version_id_fkey FOREIGN KEY (template_version_id) REFERENCES template_versions(id) ON DELETE CASCADE;
|
||||
ForeignKeyWorkspaceBuildsTemplateVersionPresetID ForeignKeyConstraint = "workspace_builds_template_version_preset_id_fkey" // ALTER TABLE ONLY workspace_builds ADD CONSTRAINT workspace_builds_template_version_preset_id_fkey FOREIGN KEY (template_version_preset_id) REFERENCES template_version_presets(id) ON DELETE SET NULL;
|
||||
|
||||
-4
@@ -1,4 +0,0 @@
|
||||
-- WARNING: Restoring this constraint after running a newer version of coderd
|
||||
-- and using tasks is bound to break this constraint.
|
||||
ALTER TABLE workspace_builds
|
||||
ADD CONSTRAINT workspace_builds_ai_task_sidebar_app_id_required CHECK (((((has_ai_task IS NULL) OR (has_ai_task = false)) AND (ai_task_sidebar_app_id IS NULL)) OR ((has_ai_task = true) AND (ai_task_sidebar_app_id IS NOT NULL))));
|
||||
-4
@@ -1,4 +0,0 @@
|
||||
-- We no longer need to enforce this constraint as tasks have their own data
|
||||
-- model.
|
||||
ALTER TABLE workspace_builds
|
||||
DROP CONSTRAINT workspace_builds_ai_task_sidebar_app_id_required;
|
||||
-45
@@ -1,45 +0,0 @@
|
||||
ALTER TABLE workspace_builds ADD COLUMN ai_task_sidebar_app_id UUID;
|
||||
ALTER TABLE workspace_builds ADD CONSTRAINT workspace_builds_ai_task_sidebar_app_id_fkey FOREIGN KEY (ai_task_sidebar_app_id) REFERENCES workspace_apps(id);
|
||||
|
||||
DROP VIEW workspace_build_with_user;
|
||||
-- Restore view.
|
||||
CREATE VIEW workspace_build_with_user AS
|
||||
SELECT
|
||||
workspace_builds.id,
|
||||
workspace_builds.created_at,
|
||||
workspace_builds.updated_at,
|
||||
workspace_builds.workspace_id,
|
||||
workspace_builds.template_version_id,
|
||||
workspace_builds.build_number,
|
||||
workspace_builds.transition,
|
||||
workspace_builds.initiator_id,
|
||||
workspace_builds.provisioner_state,
|
||||
workspace_builds.job_id,
|
||||
workspace_builds.deadline,
|
||||
workspace_builds.reason,
|
||||
workspace_builds.daily_cost,
|
||||
workspace_builds.max_deadline,
|
||||
workspace_builds.template_version_preset_id,
|
||||
workspace_builds.has_ai_task,
|
||||
workspace_builds.ai_task_sidebar_app_id,
|
||||
workspace_builds.has_external_agent,
|
||||
COALESCE(
|
||||
visible_users.avatar_url,
|
||||
'' :: text
|
||||
) AS initiator_by_avatar_url,
|
||||
COALESCE(
|
||||
visible_users.username,
|
||||
'' :: text
|
||||
) AS initiator_by_username,
|
||||
COALESCE(visible_users.name, '' :: text) AS initiator_by_name
|
||||
FROM
|
||||
(
|
||||
workspace_builds
|
||||
LEFT JOIN visible_users ON (
|
||||
(
|
||||
workspace_builds.initiator_id = visible_users.id
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
COMMENT ON VIEW workspace_build_with_user IS 'Joins in the username + avatar url of the initiated by user.';
|
||||
-43
@@ -1,43 +0,0 @@
|
||||
-- We're dropping the ai_task_sidebar_app_id column.
|
||||
DROP VIEW workspace_build_with_user;
|
||||
CREATE VIEW workspace_build_with_user AS
|
||||
SELECT
|
||||
workspace_builds.id,
|
||||
workspace_builds.created_at,
|
||||
workspace_builds.updated_at,
|
||||
workspace_builds.workspace_id,
|
||||
workspace_builds.template_version_id,
|
||||
workspace_builds.build_number,
|
||||
workspace_builds.transition,
|
||||
workspace_builds.initiator_id,
|
||||
workspace_builds.provisioner_state,
|
||||
workspace_builds.job_id,
|
||||
workspace_builds.deadline,
|
||||
workspace_builds.reason,
|
||||
workspace_builds.daily_cost,
|
||||
workspace_builds.max_deadline,
|
||||
workspace_builds.template_version_preset_id,
|
||||
workspace_builds.has_ai_task,
|
||||
workspace_builds.has_external_agent,
|
||||
COALESCE(
|
||||
visible_users.avatar_url,
|
||||
'' :: text
|
||||
) AS initiator_by_avatar_url,
|
||||
COALESCE(
|
||||
visible_users.username,
|
||||
'' :: text
|
||||
) AS initiator_by_username,
|
||||
COALESCE(visible_users.name, '' :: text) AS initiator_by_name
|
||||
FROM
|
||||
(
|
||||
workspace_builds
|
||||
LEFT JOIN visible_users ON (
|
||||
(
|
||||
workspace_builds.initiator_id = visible_users.id
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
COMMENT ON VIEW workspace_build_with_user IS 'Joins in the username + avatar url of the initiated by user.';
|
||||
|
||||
ALTER TABLE workspace_builds DROP COLUMN ai_task_sidebar_app_id;
|
||||
@@ -1 +0,0 @@
|
||||
ALTER TABLE aibridge_interceptions DROP COLUMN api_key_id;
|
||||
@@ -1,2 +0,0 @@
|
||||
-- column is nullable to not break interceptions recorded before this column was added
|
||||
ALTER TABLE aibridge_interceptions ADD COLUMN api_key_id text;
|
||||
@@ -329,6 +329,7 @@ func (q *sqlQuerier) GetAuthorizedWorkspaces(ctx context.Context, arg GetWorkspa
|
||||
&i.LatestBuildError,
|
||||
&i.LatestBuildTransition,
|
||||
&i.LatestBuildStatus,
|
||||
&i.LatestBuildHasAITask,
|
||||
&i.LatestBuildHasExternalAgent,
|
||||
&i.Count,
|
||||
); err != nil {
|
||||
@@ -805,7 +806,6 @@ func (q *sqlQuerier) ListAuthorizedAIBridgeInterceptions(ctx context.Context, ar
|
||||
&i.AIBridgeInterception.StartedAt,
|
||||
&i.AIBridgeInterception.Metadata,
|
||||
&i.AIBridgeInterception.EndedAt,
|
||||
&i.AIBridgeInterception.APIKeyID,
|
||||
&i.VisibleUser.ID,
|
||||
&i.VisibleUser.Username,
|
||||
&i.VisibleUser.Name,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.30.0
|
||||
// sqlc v1.27.0
|
||||
|
||||
package database
|
||||
|
||||
@@ -3614,7 +3614,6 @@ type AIBridgeInterception struct {
|
||||
StartedAt time.Time `db:"started_at" json:"started_at"`
|
||||
Metadata pqtype.NullRawMessage `db:"metadata" json:"metadata"`
|
||||
EndedAt sql.NullTime `db:"ended_at" json:"ended_at"`
|
||||
APIKeyID sql.NullString `db:"api_key_id" json:"api_key_id"`
|
||||
}
|
||||
|
||||
// Audit log of tokens used by intercepted requests in AI Bridge
|
||||
@@ -4940,6 +4939,7 @@ type WorkspaceBuild struct {
|
||||
MaxDeadline time.Time `db:"max_deadline" json:"max_deadline"`
|
||||
TemplateVersionPresetID uuid.NullUUID `db:"template_version_preset_id" json:"template_version_preset_id"`
|
||||
HasAITask sql.NullBool `db:"has_ai_task" json:"has_ai_task"`
|
||||
AITaskSidebarAppID uuid.NullUUID `db:"ai_task_sidebar_app_id" json:"ai_task_sidebar_app_id"`
|
||||
HasExternalAgent sql.NullBool `db:"has_external_agent" json:"has_external_agent"`
|
||||
InitiatorByAvatarUrl string `db:"initiator_by_avatar_url" json:"initiator_by_avatar_url"`
|
||||
InitiatorByUsername string `db:"initiator_by_username" json:"initiator_by_username"`
|
||||
@@ -4971,6 +4971,7 @@ type WorkspaceBuildTable struct {
|
||||
MaxDeadline time.Time `db:"max_deadline" json:"max_deadline"`
|
||||
TemplateVersionPresetID uuid.NullUUID `db:"template_version_preset_id" json:"template_version_preset_id"`
|
||||
HasAITask sql.NullBool `db:"has_ai_task" json:"has_ai_task"`
|
||||
AITaskSidebarAppID uuid.NullUUID `db:"ai_task_sidebar_app_id" json:"ai_task_sidebar_app_id"`
|
||||
HasExternalAgent sql.NullBool `db:"has_external_agent" json:"has_external_agent"`
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.30.0
|
||||
// sqlc v1.27.0
|
||||
|
||||
package database
|
||||
|
||||
@@ -343,7 +343,6 @@ type sqlcQuerier interface {
|
||||
GetTailnetTunnelPeerBindings(ctx context.Context, srcID uuid.UUID) ([]GetTailnetTunnelPeerBindingsRow, error)
|
||||
GetTailnetTunnelPeerIDs(ctx context.Context, srcID uuid.UUID) ([]GetTailnetTunnelPeerIDsRow, error)
|
||||
GetTaskByID(ctx context.Context, id uuid.UUID) (Task, error)
|
||||
GetTaskByOwnerIDAndName(ctx context.Context, arg GetTaskByOwnerIDAndNameParams) (Task, error)
|
||||
GetTaskByWorkspaceID(ctx context.Context, workspaceID uuid.UUID) (Task, error)
|
||||
GetTelemetryItem(ctx context.Context, key string) (TelemetryItem, error)
|
||||
GetTelemetryItems(ctx context.Context) ([]TelemetryItem, error)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.30.0
|
||||
// sqlc v1.27.0
|
||||
|
||||
package database
|
||||
|
||||
@@ -326,7 +326,7 @@ func (q *sqlQuerier) CountAIBridgeInterceptions(ctx context.Context, arg CountAI
|
||||
|
||||
const getAIBridgeInterceptionByID = `-- name: GetAIBridgeInterceptionByID :one
|
||||
SELECT
|
||||
id, initiator_id, provider, model, started_at, metadata, ended_at, api_key_id
|
||||
id, initiator_id, provider, model, started_at, metadata, ended_at
|
||||
FROM
|
||||
aibridge_interceptions
|
||||
WHERE
|
||||
@@ -344,14 +344,13 @@ func (q *sqlQuerier) GetAIBridgeInterceptionByID(ctx context.Context, id uuid.UU
|
||||
&i.StartedAt,
|
||||
&i.Metadata,
|
||||
&i.EndedAt,
|
||||
&i.APIKeyID,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getAIBridgeInterceptions = `-- name: GetAIBridgeInterceptions :many
|
||||
SELECT
|
||||
id, initiator_id, provider, model, started_at, metadata, ended_at, api_key_id
|
||||
id, initiator_id, provider, model, started_at, metadata, ended_at
|
||||
FROM
|
||||
aibridge_interceptions
|
||||
`
|
||||
@@ -373,7 +372,6 @@ func (q *sqlQuerier) GetAIBridgeInterceptions(ctx context.Context) ([]AIBridgeIn
|
||||
&i.StartedAt,
|
||||
&i.Metadata,
|
||||
&i.EndedAt,
|
||||
&i.APIKeyID,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -519,16 +517,15 @@ func (q *sqlQuerier) GetAIBridgeUserPromptsByInterceptionID(ctx context.Context,
|
||||
|
||||
const insertAIBridgeInterception = `-- name: InsertAIBridgeInterception :one
|
||||
INSERT INTO aibridge_interceptions (
|
||||
id, api_key_id, initiator_id, provider, model, metadata, started_at
|
||||
id, initiator_id, provider, model, metadata, started_at
|
||||
) VALUES (
|
||||
$1, $2, $3, $4, $5, COALESCE($6::jsonb, '{}'::jsonb), $7
|
||||
$1, $2, $3, $4, COALESCE($5::jsonb, '{}'::jsonb), $6
|
||||
)
|
||||
RETURNING id, initiator_id, provider, model, started_at, metadata, ended_at, api_key_id
|
||||
RETURNING id, initiator_id, provider, model, started_at, metadata, ended_at
|
||||
`
|
||||
|
||||
type InsertAIBridgeInterceptionParams struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
APIKeyID sql.NullString `db:"api_key_id" json:"api_key_id"`
|
||||
InitiatorID uuid.UUID `db:"initiator_id" json:"initiator_id"`
|
||||
Provider string `db:"provider" json:"provider"`
|
||||
Model string `db:"model" json:"model"`
|
||||
@@ -539,7 +536,6 @@ type InsertAIBridgeInterceptionParams struct {
|
||||
func (q *sqlQuerier) InsertAIBridgeInterception(ctx context.Context, arg InsertAIBridgeInterceptionParams) (AIBridgeInterception, error) {
|
||||
row := q.db.QueryRowContext(ctx, insertAIBridgeInterception,
|
||||
arg.ID,
|
||||
arg.APIKeyID,
|
||||
arg.InitiatorID,
|
||||
arg.Provider,
|
||||
arg.Model,
|
||||
@@ -555,7 +551,6 @@ func (q *sqlQuerier) InsertAIBridgeInterception(ctx context.Context, arg InsertA
|
||||
&i.StartedAt,
|
||||
&i.Metadata,
|
||||
&i.EndedAt,
|
||||
&i.APIKeyID,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
@@ -694,7 +689,7 @@ func (q *sqlQuerier) InsertAIBridgeUserPrompt(ctx context.Context, arg InsertAIB
|
||||
|
||||
const listAIBridgeInterceptions = `-- name: ListAIBridgeInterceptions :many
|
||||
SELECT
|
||||
aibridge_interceptions.id, aibridge_interceptions.initiator_id, aibridge_interceptions.provider, aibridge_interceptions.model, aibridge_interceptions.started_at, aibridge_interceptions.metadata, aibridge_interceptions.ended_at, aibridge_interceptions.api_key_id,
|
||||
aibridge_interceptions.id, aibridge_interceptions.initiator_id, aibridge_interceptions.provider, aibridge_interceptions.model, aibridge_interceptions.started_at, aibridge_interceptions.metadata, aibridge_interceptions.ended_at,
|
||||
visible_users.id, visible_users.username, visible_users.name, visible_users.avatar_url
|
||||
FROM
|
||||
aibridge_interceptions
|
||||
@@ -792,7 +787,6 @@ func (q *sqlQuerier) ListAIBridgeInterceptions(ctx context.Context, arg ListAIBr
|
||||
&i.AIBridgeInterception.StartedAt,
|
||||
&i.AIBridgeInterception.Metadata,
|
||||
&i.AIBridgeInterception.EndedAt,
|
||||
&i.AIBridgeInterception.APIKeyID,
|
||||
&i.VisibleUser.ID,
|
||||
&i.VisibleUser.Username,
|
||||
&i.VisibleUser.Name,
|
||||
@@ -999,7 +993,7 @@ UPDATE aibridge_interceptions
|
||||
WHERE
|
||||
id = $2::uuid
|
||||
AND ended_at IS NULL
|
||||
RETURNING id, initiator_id, provider, model, started_at, metadata, ended_at, api_key_id
|
||||
RETURNING id, initiator_id, provider, model, started_at, metadata, ended_at
|
||||
`
|
||||
|
||||
type UpdateAIBridgeInterceptionEndedParams struct {
|
||||
@@ -1018,7 +1012,6 @@ func (q *sqlQuerier) UpdateAIBridgeInterceptionEnded(ctx context.Context, arg Up
|
||||
&i.StartedAt,
|
||||
&i.Metadata,
|
||||
&i.EndedAt,
|
||||
&i.APIKeyID,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
@@ -13100,44 +13093,6 @@ func (q *sqlQuerier) GetTaskByID(ctx context.Context, id uuid.UUID) (Task, error
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getTaskByOwnerIDAndName = `-- name: GetTaskByOwnerIDAndName :one
|
||||
SELECT id, organization_id, owner_id, name, workspace_id, template_version_id, template_parameters, prompt, created_at, deleted_at, status, workspace_build_number, workspace_agent_id, workspace_app_id, owner_username, owner_name, owner_avatar_url FROM tasks_with_status
|
||||
WHERE
|
||||
owner_id = $1::uuid
|
||||
AND deleted_at IS NULL
|
||||
AND LOWER(name) = LOWER($2::text)
|
||||
`
|
||||
|
||||
type GetTaskByOwnerIDAndNameParams struct {
|
||||
OwnerID uuid.UUID `db:"owner_id" json:"owner_id"`
|
||||
Name string `db:"name" json:"name"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) GetTaskByOwnerIDAndName(ctx context.Context, arg GetTaskByOwnerIDAndNameParams) (Task, error) {
|
||||
row := q.db.QueryRowContext(ctx, getTaskByOwnerIDAndName, arg.OwnerID, arg.Name)
|
||||
var i Task
|
||||
err := row.Scan(
|
||||
&i.ID,
|
||||
&i.OrganizationID,
|
||||
&i.OwnerID,
|
||||
&i.Name,
|
||||
&i.WorkspaceID,
|
||||
&i.TemplateVersionID,
|
||||
&i.TemplateParameters,
|
||||
&i.Prompt,
|
||||
&i.CreatedAt,
|
||||
&i.DeletedAt,
|
||||
&i.Status,
|
||||
&i.WorkspaceBuildNumber,
|
||||
&i.WorkspaceAgentID,
|
||||
&i.WorkspaceAppID,
|
||||
&i.OwnerUsername,
|
||||
&i.OwnerName,
|
||||
&i.OwnerAvatarUrl,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getTaskByWorkspaceID = `-- name: GetTaskByWorkspaceID :one
|
||||
SELECT id, organization_id, owner_id, name, workspace_id, template_version_id, template_parameters, prompt, created_at, deleted_at, status, workspace_build_number, workspace_agent_id, workspace_app_id, owner_username, owner_name, owner_avatar_url FROM tasks_with_status WHERE workspace_id = $1::uuid
|
||||
`
|
||||
@@ -17614,8 +17569,7 @@ const getWorkspaceAgentAndLatestBuildByAuthToken = `-- name: GetWorkspaceAgentAn
|
||||
SELECT
|
||||
workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite, workspaces.next_start_at, workspaces.group_acl, workspaces.user_acl,
|
||||
workspace_agents.id, workspace_agents.created_at, workspace_agents.updated_at, workspace_agents.name, workspace_agents.first_connected_at, workspace_agents.last_connected_at, workspace_agents.disconnected_at, workspace_agents.resource_id, workspace_agents.auth_token, workspace_agents.auth_instance_id, workspace_agents.architecture, workspace_agents.environment_variables, workspace_agents.operating_system, workspace_agents.instance_metadata, workspace_agents.resource_metadata, workspace_agents.directory, workspace_agents.version, workspace_agents.last_connected_replica_id, workspace_agents.connection_timeout_seconds, workspace_agents.troubleshooting_url, workspace_agents.motd_file, workspace_agents.lifecycle_state, workspace_agents.expanded_directory, workspace_agents.logs_length, workspace_agents.logs_overflowed, workspace_agents.started_at, workspace_agents.ready_at, workspace_agents.subsystems, workspace_agents.display_apps, workspace_agents.api_version, workspace_agents.display_order, workspace_agents.parent_id, workspace_agents.api_key_scope, workspace_agents.deleted,
|
||||
workspace_build_with_user.id, workspace_build_with_user.created_at, workspace_build_with_user.updated_at, workspace_build_with_user.workspace_id, workspace_build_with_user.template_version_id, workspace_build_with_user.build_number, workspace_build_with_user.transition, workspace_build_with_user.initiator_id, workspace_build_with_user.provisioner_state, workspace_build_with_user.job_id, workspace_build_with_user.deadline, workspace_build_with_user.reason, workspace_build_with_user.daily_cost, workspace_build_with_user.max_deadline, workspace_build_with_user.template_version_preset_id, workspace_build_with_user.has_ai_task, workspace_build_with_user.has_external_agent, workspace_build_with_user.initiator_by_avatar_url, workspace_build_with_user.initiator_by_username, workspace_build_with_user.initiator_by_name,
|
||||
tasks.id AS task_id
|
||||
workspace_build_with_user.id, workspace_build_with_user.created_at, workspace_build_with_user.updated_at, workspace_build_with_user.workspace_id, workspace_build_with_user.template_version_id, workspace_build_with_user.build_number, workspace_build_with_user.transition, workspace_build_with_user.initiator_id, workspace_build_with_user.provisioner_state, workspace_build_with_user.job_id, workspace_build_with_user.deadline, workspace_build_with_user.reason, workspace_build_with_user.daily_cost, workspace_build_with_user.max_deadline, workspace_build_with_user.template_version_preset_id, workspace_build_with_user.has_ai_task, workspace_build_with_user.ai_task_sidebar_app_id, workspace_build_with_user.has_external_agent, workspace_build_with_user.initiator_by_avatar_url, workspace_build_with_user.initiator_by_username, workspace_build_with_user.initiator_by_name
|
||||
FROM
|
||||
workspace_agents
|
||||
JOIN
|
||||
@@ -17630,10 +17584,6 @@ JOIN
|
||||
workspaces
|
||||
ON
|
||||
workspace_build_with_user.workspace_id = workspaces.id
|
||||
LEFT JOIN
|
||||
tasks
|
||||
ON
|
||||
tasks.workspace_id = workspaces.id
|
||||
WHERE
|
||||
-- This should only match 1 agent, so 1 returned row or 0.
|
||||
workspace_agents.auth_token = $1::uuid
|
||||
@@ -17657,7 +17607,6 @@ type GetWorkspaceAgentAndLatestBuildByAuthTokenRow struct {
|
||||
WorkspaceTable WorkspaceTable `db:"workspace_table" json:"workspace_table"`
|
||||
WorkspaceAgent WorkspaceAgent `db:"workspace_agent" json:"workspace_agent"`
|
||||
WorkspaceBuild WorkspaceBuild `db:"workspace_build" json:"workspace_build"`
|
||||
TaskID uuid.NullUUID `db:"task_id" json:"task_id"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) GetWorkspaceAgentAndLatestBuildByAuthToken(ctx context.Context, authToken uuid.UUID) (GetWorkspaceAgentAndLatestBuildByAuthTokenRow, error) {
|
||||
@@ -17732,11 +17681,11 @@ func (q *sqlQuerier) GetWorkspaceAgentAndLatestBuildByAuthToken(ctx context.Cont
|
||||
&i.WorkspaceBuild.MaxDeadline,
|
||||
&i.WorkspaceBuild.TemplateVersionPresetID,
|
||||
&i.WorkspaceBuild.HasAITask,
|
||||
&i.WorkspaceBuild.AITaskSidebarAppID,
|
||||
&i.WorkspaceBuild.HasExternalAgent,
|
||||
&i.WorkspaceBuild.InitiatorByAvatarUrl,
|
||||
&i.WorkspaceBuild.InitiatorByUsername,
|
||||
&i.WorkspaceBuild.InitiatorByName,
|
||||
&i.TaskID,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
@@ -20532,7 +20481,7 @@ func (q *sqlQuerier) InsertWorkspaceBuildParameters(ctx context.Context, arg Ins
|
||||
}
|
||||
|
||||
const getActiveWorkspaceBuildsByTemplateID = `-- name: GetActiveWorkspaceBuildsByTemplateID :many
|
||||
SELECT wb.id, wb.created_at, wb.updated_at, wb.workspace_id, wb.template_version_id, wb.build_number, wb.transition, wb.initiator_id, wb.provisioner_state, wb.job_id, wb.deadline, wb.reason, wb.daily_cost, wb.max_deadline, wb.template_version_preset_id, wb.has_ai_task, wb.has_external_agent, wb.initiator_by_avatar_url, wb.initiator_by_username, wb.initiator_by_name
|
||||
SELECT wb.id, wb.created_at, wb.updated_at, wb.workspace_id, wb.template_version_id, wb.build_number, wb.transition, wb.initiator_id, wb.provisioner_state, wb.job_id, wb.deadline, wb.reason, wb.daily_cost, wb.max_deadline, wb.template_version_preset_id, wb.has_ai_task, wb.ai_task_sidebar_app_id, wb.has_external_agent, wb.initiator_by_avatar_url, wb.initiator_by_username, wb.initiator_by_name
|
||||
FROM (
|
||||
SELECT
|
||||
workspace_id, MAX(build_number) as max_build_number
|
||||
@@ -20588,6 +20537,7 @@ func (q *sqlQuerier) GetActiveWorkspaceBuildsByTemplateID(ctx context.Context, t
|
||||
&i.MaxDeadline,
|
||||
&i.TemplateVersionPresetID,
|
||||
&i.HasAITask,
|
||||
&i.AITaskSidebarAppID,
|
||||
&i.HasExternalAgent,
|
||||
&i.InitiatorByAvatarUrl,
|
||||
&i.InitiatorByUsername,
|
||||
@@ -20688,7 +20638,7 @@ func (q *sqlQuerier) GetFailedWorkspaceBuildsByTemplateID(ctx context.Context, a
|
||||
|
||||
const getLatestWorkspaceBuildByWorkspaceID = `-- name: GetLatestWorkspaceBuildByWorkspaceID :one
|
||||
SELECT
|
||||
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, template_version_preset_id, has_ai_task, has_external_agent, initiator_by_avatar_url, initiator_by_username, initiator_by_name
|
||||
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, template_version_preset_id, has_ai_task, ai_task_sidebar_app_id, has_external_agent, initiator_by_avatar_url, initiator_by_username, initiator_by_name
|
||||
FROM
|
||||
workspace_build_with_user AS workspace_builds
|
||||
WHERE
|
||||
@@ -20719,6 +20669,7 @@ func (q *sqlQuerier) GetLatestWorkspaceBuildByWorkspaceID(ctx context.Context, w
|
||||
&i.MaxDeadline,
|
||||
&i.TemplateVersionPresetID,
|
||||
&i.HasAITask,
|
||||
&i.AITaskSidebarAppID,
|
||||
&i.HasExternalAgent,
|
||||
&i.InitiatorByAvatarUrl,
|
||||
&i.InitiatorByUsername,
|
||||
@@ -20730,7 +20681,7 @@ func (q *sqlQuerier) GetLatestWorkspaceBuildByWorkspaceID(ctx context.Context, w
|
||||
const getLatestWorkspaceBuildsByWorkspaceIDs = `-- name: GetLatestWorkspaceBuildsByWorkspaceIDs :many
|
||||
SELECT
|
||||
DISTINCT ON (workspace_id)
|
||||
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, template_version_preset_id, has_ai_task, has_external_agent, initiator_by_avatar_url, initiator_by_username, initiator_by_name
|
||||
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, template_version_preset_id, has_ai_task, ai_task_sidebar_app_id, has_external_agent, initiator_by_avatar_url, initiator_by_username, initiator_by_name
|
||||
FROM
|
||||
workspace_build_with_user AS workspace_builds
|
||||
WHERE
|
||||
@@ -20765,6 +20716,7 @@ func (q *sqlQuerier) GetLatestWorkspaceBuildsByWorkspaceIDs(ctx context.Context,
|
||||
&i.MaxDeadline,
|
||||
&i.TemplateVersionPresetID,
|
||||
&i.HasAITask,
|
||||
&i.AITaskSidebarAppID,
|
||||
&i.HasExternalAgent,
|
||||
&i.InitiatorByAvatarUrl,
|
||||
&i.InitiatorByUsername,
|
||||
@@ -20785,7 +20737,7 @@ func (q *sqlQuerier) GetLatestWorkspaceBuildsByWorkspaceIDs(ctx context.Context,
|
||||
|
||||
const getWorkspaceBuildByID = `-- name: GetWorkspaceBuildByID :one
|
||||
SELECT
|
||||
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, template_version_preset_id, has_ai_task, has_external_agent, initiator_by_avatar_url, initiator_by_username, initiator_by_name
|
||||
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, template_version_preset_id, has_ai_task, ai_task_sidebar_app_id, has_external_agent, initiator_by_avatar_url, initiator_by_username, initiator_by_name
|
||||
FROM
|
||||
workspace_build_with_user AS workspace_builds
|
||||
WHERE
|
||||
@@ -20814,6 +20766,7 @@ func (q *sqlQuerier) GetWorkspaceBuildByID(ctx context.Context, id uuid.UUID) (W
|
||||
&i.MaxDeadline,
|
||||
&i.TemplateVersionPresetID,
|
||||
&i.HasAITask,
|
||||
&i.AITaskSidebarAppID,
|
||||
&i.HasExternalAgent,
|
||||
&i.InitiatorByAvatarUrl,
|
||||
&i.InitiatorByUsername,
|
||||
@@ -20824,7 +20777,7 @@ func (q *sqlQuerier) GetWorkspaceBuildByID(ctx context.Context, id uuid.UUID) (W
|
||||
|
||||
const getWorkspaceBuildByJobID = `-- name: GetWorkspaceBuildByJobID :one
|
||||
SELECT
|
||||
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, template_version_preset_id, has_ai_task, has_external_agent, initiator_by_avatar_url, initiator_by_username, initiator_by_name
|
||||
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, template_version_preset_id, has_ai_task, ai_task_sidebar_app_id, has_external_agent, initiator_by_avatar_url, initiator_by_username, initiator_by_name
|
||||
FROM
|
||||
workspace_build_with_user AS workspace_builds
|
||||
WHERE
|
||||
@@ -20853,6 +20806,7 @@ func (q *sqlQuerier) GetWorkspaceBuildByJobID(ctx context.Context, jobID uuid.UU
|
||||
&i.MaxDeadline,
|
||||
&i.TemplateVersionPresetID,
|
||||
&i.HasAITask,
|
||||
&i.AITaskSidebarAppID,
|
||||
&i.HasExternalAgent,
|
||||
&i.InitiatorByAvatarUrl,
|
||||
&i.InitiatorByUsername,
|
||||
@@ -20863,7 +20817,7 @@ func (q *sqlQuerier) GetWorkspaceBuildByJobID(ctx context.Context, jobID uuid.UU
|
||||
|
||||
const getWorkspaceBuildByWorkspaceIDAndBuildNumber = `-- name: GetWorkspaceBuildByWorkspaceIDAndBuildNumber :one
|
||||
SELECT
|
||||
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, template_version_preset_id, has_ai_task, has_external_agent, initiator_by_avatar_url, initiator_by_username, initiator_by_name
|
||||
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, template_version_preset_id, has_ai_task, ai_task_sidebar_app_id, has_external_agent, initiator_by_avatar_url, initiator_by_username, initiator_by_name
|
||||
FROM
|
||||
workspace_build_with_user AS workspace_builds
|
||||
WHERE
|
||||
@@ -20896,6 +20850,7 @@ func (q *sqlQuerier) GetWorkspaceBuildByWorkspaceIDAndBuildNumber(ctx context.Co
|
||||
&i.MaxDeadline,
|
||||
&i.TemplateVersionPresetID,
|
||||
&i.HasAITask,
|
||||
&i.AITaskSidebarAppID,
|
||||
&i.HasExternalAgent,
|
||||
&i.InitiatorByAvatarUrl,
|
||||
&i.InitiatorByUsername,
|
||||
@@ -20973,7 +20928,7 @@ func (q *sqlQuerier) GetWorkspaceBuildStatsByTemplates(ctx context.Context, sinc
|
||||
|
||||
const getWorkspaceBuildsByWorkspaceID = `-- name: GetWorkspaceBuildsByWorkspaceID :many
|
||||
SELECT
|
||||
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, template_version_preset_id, has_ai_task, has_external_agent, initiator_by_avatar_url, initiator_by_username, initiator_by_name
|
||||
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, template_version_preset_id, has_ai_task, ai_task_sidebar_app_id, has_external_agent, initiator_by_avatar_url, initiator_by_username, initiator_by_name
|
||||
FROM
|
||||
workspace_build_with_user AS workspace_builds
|
||||
WHERE
|
||||
@@ -21045,6 +21000,7 @@ func (q *sqlQuerier) GetWorkspaceBuildsByWorkspaceID(ctx context.Context, arg Ge
|
||||
&i.MaxDeadline,
|
||||
&i.TemplateVersionPresetID,
|
||||
&i.HasAITask,
|
||||
&i.AITaskSidebarAppID,
|
||||
&i.HasExternalAgent,
|
||||
&i.InitiatorByAvatarUrl,
|
||||
&i.InitiatorByUsername,
|
||||
@@ -21064,7 +21020,7 @@ func (q *sqlQuerier) GetWorkspaceBuildsByWorkspaceID(ctx context.Context, arg Ge
|
||||
}
|
||||
|
||||
const getWorkspaceBuildsCreatedAfter = `-- name: GetWorkspaceBuildsCreatedAfter :many
|
||||
SELECT id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, template_version_preset_id, has_ai_task, has_external_agent, initiator_by_avatar_url, initiator_by_username, initiator_by_name FROM workspace_build_with_user WHERE created_at > $1
|
||||
SELECT id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, template_version_preset_id, has_ai_task, ai_task_sidebar_app_id, has_external_agent, initiator_by_avatar_url, initiator_by_username, initiator_by_name FROM workspace_build_with_user WHERE created_at > $1
|
||||
`
|
||||
|
||||
func (q *sqlQuerier) GetWorkspaceBuildsCreatedAfter(ctx context.Context, createdAt time.Time) ([]WorkspaceBuild, error) {
|
||||
@@ -21093,6 +21049,7 @@ func (q *sqlQuerier) GetWorkspaceBuildsCreatedAfter(ctx context.Context, created
|
||||
&i.MaxDeadline,
|
||||
&i.TemplateVersionPresetID,
|
||||
&i.HasAITask,
|
||||
&i.AITaskSidebarAppID,
|
||||
&i.HasExternalAgent,
|
||||
&i.InitiatorByAvatarUrl,
|
||||
&i.InitiatorByUsername,
|
||||
@@ -21229,21 +21186,24 @@ UPDATE
|
||||
workspace_builds
|
||||
SET
|
||||
has_ai_task = $1,
|
||||
has_external_agent = $2,
|
||||
updated_at = $3::timestamptz
|
||||
WHERE id = $4::uuid
|
||||
ai_task_sidebar_app_id = $2,
|
||||
has_external_agent = $3,
|
||||
updated_at = $4::timestamptz
|
||||
WHERE id = $5::uuid
|
||||
`
|
||||
|
||||
type UpdateWorkspaceBuildFlagsByIDParams struct {
|
||||
HasAITask sql.NullBool `db:"has_ai_task" json:"has_ai_task"`
|
||||
HasExternalAgent sql.NullBool `db:"has_external_agent" json:"has_external_agent"`
|
||||
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
HasAITask sql.NullBool `db:"has_ai_task" json:"has_ai_task"`
|
||||
SidebarAppID uuid.NullUUID `db:"sidebar_app_id" json:"sidebar_app_id"`
|
||||
HasExternalAgent sql.NullBool `db:"has_external_agent" json:"has_external_agent"`
|
||||
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) UpdateWorkspaceBuildFlagsByID(ctx context.Context, arg UpdateWorkspaceBuildFlagsByIDParams) error {
|
||||
_, err := q.db.ExecContext(ctx, updateWorkspaceBuildFlagsByID,
|
||||
arg.HasAITask,
|
||||
arg.SidebarAppID,
|
||||
arg.HasExternalAgent,
|
||||
arg.UpdatedAt,
|
||||
arg.ID,
|
||||
@@ -22329,6 +22289,7 @@ SELECT
|
||||
latest_build.error as latest_build_error,
|
||||
latest_build.transition as latest_build_transition,
|
||||
latest_build.job_status as latest_build_status,
|
||||
latest_build.has_ai_task as latest_build_has_ai_task,
|
||||
latest_build.has_external_agent as latest_build_has_external_agent
|
||||
FROM
|
||||
workspaces_expanded as workspaces
|
||||
@@ -22562,19 +22523,25 @@ WHERE
|
||||
(latest_build.template_version_id = template.active_version_id) = $18 :: boolean
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by has_ai_task, checks if this is a task workspace.
|
||||
-- Filter by has_ai_task in latest build
|
||||
AND CASE
|
||||
WHEN $19::boolean IS NOT NULL
|
||||
THEN $19::boolean = EXISTS (
|
||||
SELECT
|
||||
1
|
||||
FROM
|
||||
tasks
|
||||
WHERE
|
||||
-- Consider all tasks, deleting a task does not turn the
|
||||
-- workspace into a non-task workspace.
|
||||
tasks.workspace_id = workspaces.id
|
||||
)
|
||||
WHEN $19 :: boolean IS NOT NULL THEN
|
||||
(COALESCE(latest_build.has_ai_task, false) OR (
|
||||
-- If the build has no AI task, it means that the provisioner job is in progress
|
||||
-- and we don't know if it has an AI task yet. In this case, we optimistically
|
||||
-- assume that it has an AI task if the AI Prompt parameter is not empty. This
|
||||
-- lets the AI Task frontend spawn a task and see it immediately after instead of
|
||||
-- having to wait for the build to complete.
|
||||
latest_build.has_ai_task IS NULL AND
|
||||
latest_build.completed_at IS NULL AND
|
||||
EXISTS (
|
||||
SELECT 1
|
||||
FROM workspace_build_parameters
|
||||
WHERE workspace_build_parameters.workspace_build_id = latest_build.id
|
||||
AND workspace_build_parameters.name = 'AI Prompt'
|
||||
AND workspace_build_parameters.value != ''
|
||||
)
|
||||
)) = ($19 :: boolean)
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by has_external_agent in latest build
|
||||
@@ -22605,7 +22572,7 @@ WHERE
|
||||
-- @authorize_filter
|
||||
), filtered_workspaces_order AS (
|
||||
SELECT
|
||||
fw.id, fw.created_at, fw.updated_at, fw.owner_id, fw.organization_id, fw.template_id, fw.deleted, fw.name, fw.autostart_schedule, fw.ttl, fw.last_used_at, fw.dormant_at, fw.deleting_at, fw.automatic_updates, fw.favorite, fw.next_start_at, fw.group_acl, fw.user_acl, fw.owner_avatar_url, fw.owner_username, fw.owner_name, fw.organization_name, fw.organization_display_name, fw.organization_icon, fw.organization_description, fw.template_name, fw.template_display_name, fw.template_icon, fw.template_description, fw.task_id, fw.template_version_id, fw.template_version_name, fw.latest_build_completed_at, fw.latest_build_canceled_at, fw.latest_build_error, fw.latest_build_transition, fw.latest_build_status, fw.latest_build_has_external_agent
|
||||
fw.id, fw.created_at, fw.updated_at, fw.owner_id, fw.organization_id, fw.template_id, fw.deleted, fw.name, fw.autostart_schedule, fw.ttl, fw.last_used_at, fw.dormant_at, fw.deleting_at, fw.automatic_updates, fw.favorite, fw.next_start_at, fw.group_acl, fw.user_acl, fw.owner_avatar_url, fw.owner_username, fw.owner_name, fw.organization_name, fw.organization_display_name, fw.organization_icon, fw.organization_description, fw.template_name, fw.template_display_name, fw.template_icon, fw.template_description, fw.task_id, fw.template_version_id, fw.template_version_name, fw.latest_build_completed_at, fw.latest_build_canceled_at, fw.latest_build_error, fw.latest_build_transition, fw.latest_build_status, fw.latest_build_has_ai_task, fw.latest_build_has_external_agent
|
||||
FROM
|
||||
filtered_workspaces fw
|
||||
ORDER BY
|
||||
@@ -22626,7 +22593,7 @@ WHERE
|
||||
$25
|
||||
), filtered_workspaces_order_with_summary AS (
|
||||
SELECT
|
||||
fwo.id, fwo.created_at, fwo.updated_at, fwo.owner_id, fwo.organization_id, fwo.template_id, fwo.deleted, fwo.name, fwo.autostart_schedule, fwo.ttl, fwo.last_used_at, fwo.dormant_at, fwo.deleting_at, fwo.automatic_updates, fwo.favorite, fwo.next_start_at, fwo.group_acl, fwo.user_acl, fwo.owner_avatar_url, fwo.owner_username, fwo.owner_name, fwo.organization_name, fwo.organization_display_name, fwo.organization_icon, fwo.organization_description, fwo.template_name, fwo.template_display_name, fwo.template_icon, fwo.template_description, fwo.task_id, fwo.template_version_id, fwo.template_version_name, fwo.latest_build_completed_at, fwo.latest_build_canceled_at, fwo.latest_build_error, fwo.latest_build_transition, fwo.latest_build_status, fwo.latest_build_has_external_agent
|
||||
fwo.id, fwo.created_at, fwo.updated_at, fwo.owner_id, fwo.organization_id, fwo.template_id, fwo.deleted, fwo.name, fwo.autostart_schedule, fwo.ttl, fwo.last_used_at, fwo.dormant_at, fwo.deleting_at, fwo.automatic_updates, fwo.favorite, fwo.next_start_at, fwo.group_acl, fwo.user_acl, fwo.owner_avatar_url, fwo.owner_username, fwo.owner_name, fwo.organization_name, fwo.organization_display_name, fwo.organization_icon, fwo.organization_description, fwo.template_name, fwo.template_display_name, fwo.template_icon, fwo.template_description, fwo.task_id, fwo.template_version_id, fwo.template_version_name, fwo.latest_build_completed_at, fwo.latest_build_canceled_at, fwo.latest_build_error, fwo.latest_build_transition, fwo.latest_build_status, fwo.latest_build_has_ai_task, fwo.latest_build_has_external_agent
|
||||
FROM
|
||||
filtered_workspaces_order fwo
|
||||
-- Return a technical summary row with total count of workspaces.
|
||||
@@ -22671,6 +22638,7 @@ WHERE
|
||||
'', -- latest_build_error
|
||||
'start'::workspace_transition, -- latest_build_transition
|
||||
'unknown'::provisioner_job_status, -- latest_build_status
|
||||
false, -- latest_build_has_ai_task
|
||||
false -- latest_build_has_external_agent
|
||||
WHERE
|
||||
$27 :: boolean = true
|
||||
@@ -22681,7 +22649,7 @@ WHERE
|
||||
filtered_workspaces
|
||||
)
|
||||
SELECT
|
||||
fwos.id, fwos.created_at, fwos.updated_at, fwos.owner_id, fwos.organization_id, fwos.template_id, fwos.deleted, fwos.name, fwos.autostart_schedule, fwos.ttl, fwos.last_used_at, fwos.dormant_at, fwos.deleting_at, fwos.automatic_updates, fwos.favorite, fwos.next_start_at, fwos.group_acl, fwos.user_acl, fwos.owner_avatar_url, fwos.owner_username, fwos.owner_name, fwos.organization_name, fwos.organization_display_name, fwos.organization_icon, fwos.organization_description, fwos.template_name, fwos.template_display_name, fwos.template_icon, fwos.template_description, fwos.task_id, fwos.template_version_id, fwos.template_version_name, fwos.latest_build_completed_at, fwos.latest_build_canceled_at, fwos.latest_build_error, fwos.latest_build_transition, fwos.latest_build_status, fwos.latest_build_has_external_agent,
|
||||
fwos.id, fwos.created_at, fwos.updated_at, fwos.owner_id, fwos.organization_id, fwos.template_id, fwos.deleted, fwos.name, fwos.autostart_schedule, fwos.ttl, fwos.last_used_at, fwos.dormant_at, fwos.deleting_at, fwos.automatic_updates, fwos.favorite, fwos.next_start_at, fwos.group_acl, fwos.user_acl, fwos.owner_avatar_url, fwos.owner_username, fwos.owner_name, fwos.organization_name, fwos.organization_display_name, fwos.organization_icon, fwos.organization_description, fwos.template_name, fwos.template_display_name, fwos.template_icon, fwos.template_description, fwos.task_id, fwos.template_version_id, fwos.template_version_name, fwos.latest_build_completed_at, fwos.latest_build_canceled_at, fwos.latest_build_error, fwos.latest_build_transition, fwos.latest_build_status, fwos.latest_build_has_ai_task, fwos.latest_build_has_external_agent,
|
||||
tc.count
|
||||
FROM
|
||||
filtered_workspaces_order_with_summary fwos
|
||||
@@ -22757,6 +22725,7 @@ type GetWorkspacesRow struct {
|
||||
LatestBuildError sql.NullString `db:"latest_build_error" json:"latest_build_error"`
|
||||
LatestBuildTransition WorkspaceTransition `db:"latest_build_transition" json:"latest_build_transition"`
|
||||
LatestBuildStatus ProvisionerJobStatus `db:"latest_build_status" json:"latest_build_status"`
|
||||
LatestBuildHasAITask sql.NullBool `db:"latest_build_has_ai_task" json:"latest_build_has_ai_task"`
|
||||
LatestBuildHasExternalAgent sql.NullBool `db:"latest_build_has_external_agent" json:"latest_build_has_external_agent"`
|
||||
Count int64 `db:"count" json:"count"`
|
||||
}
|
||||
@@ -22839,6 +22808,7 @@ func (q *sqlQuerier) GetWorkspaces(ctx context.Context, arg GetWorkspacesParams)
|
||||
&i.LatestBuildError,
|
||||
&i.LatestBuildTransition,
|
||||
&i.LatestBuildStatus,
|
||||
&i.LatestBuildHasAITask,
|
||||
&i.LatestBuildHasExternalAgent,
|
||||
&i.Count,
|
||||
); err != nil {
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
-- name: InsertAIBridgeInterception :one
|
||||
INSERT INTO aibridge_interceptions (
|
||||
id, api_key_id, initiator_id, provider, model, metadata, started_at
|
||||
id, initiator_id, provider, model, metadata, started_at
|
||||
) VALUES (
|
||||
@id, @api_key_id, @initiator_id, @provider, @model, COALESCE(@metadata::jsonb, '{}'::jsonb), @started_at
|
||||
@id, @initiator_id, @provider, @model, COALESCE(@metadata::jsonb, '{}'::jsonb), @started_at
|
||||
)
|
||||
RETURNING *;
|
||||
|
||||
|
||||
@@ -41,13 +41,6 @@ SELECT * FROM tasks_with_status WHERE id = @id::uuid;
|
||||
-- name: GetTaskByWorkspaceID :one
|
||||
SELECT * FROM tasks_with_status WHERE workspace_id = @workspace_id::uuid;
|
||||
|
||||
-- name: GetTaskByOwnerIDAndName :one
|
||||
SELECT * FROM tasks_with_status
|
||||
WHERE
|
||||
owner_id = @owner_id::uuid
|
||||
AND deleted_at IS NULL
|
||||
AND LOWER(name) = LOWER(@name::text);
|
||||
|
||||
-- name: ListTasks :many
|
||||
SELECT * FROM tasks_with_status tws
|
||||
WHERE tws.deleted_at IS NULL
|
||||
|
||||
@@ -285,8 +285,7 @@ WHERE
|
||||
SELECT
|
||||
sqlc.embed(workspaces),
|
||||
sqlc.embed(workspace_agents),
|
||||
sqlc.embed(workspace_build_with_user),
|
||||
tasks.id AS task_id
|
||||
sqlc.embed(workspace_build_with_user)
|
||||
FROM
|
||||
workspace_agents
|
||||
JOIN
|
||||
@@ -301,10 +300,6 @@ JOIN
|
||||
workspaces
|
||||
ON
|
||||
workspace_build_with_user.workspace_id = workspaces.id
|
||||
LEFT JOIN
|
||||
tasks
|
||||
ON
|
||||
tasks.workspace_id = workspaces.id
|
||||
WHERE
|
||||
-- This should only match 1 agent, so 1 returned row or 0.
|
||||
workspace_agents.auth_token = @auth_token::uuid
|
||||
|
||||
@@ -240,6 +240,7 @@ UPDATE
|
||||
workspace_builds
|
||||
SET
|
||||
has_ai_task = @has_ai_task,
|
||||
ai_task_sidebar_app_id = @sidebar_app_id,
|
||||
has_external_agent = @has_external_agent,
|
||||
updated_at = @updated_at::timestamptz
|
||||
WHERE id = @id::uuid;
|
||||
|
||||
@@ -117,6 +117,7 @@ SELECT
|
||||
latest_build.error as latest_build_error,
|
||||
latest_build.transition as latest_build_transition,
|
||||
latest_build.job_status as latest_build_status,
|
||||
latest_build.has_ai_task as latest_build_has_ai_task,
|
||||
latest_build.has_external_agent as latest_build_has_external_agent
|
||||
FROM
|
||||
workspaces_expanded as workspaces
|
||||
@@ -350,19 +351,25 @@ WHERE
|
||||
(latest_build.template_version_id = template.active_version_id) = sqlc.narg('using_active') :: boolean
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by has_ai_task, checks if this is a task workspace.
|
||||
-- Filter by has_ai_task in latest build
|
||||
AND CASE
|
||||
WHEN sqlc.narg('has_ai_task')::boolean IS NOT NULL
|
||||
THEN sqlc.narg('has_ai_task')::boolean = EXISTS (
|
||||
SELECT
|
||||
1
|
||||
FROM
|
||||
tasks
|
||||
WHERE
|
||||
-- Consider all tasks, deleting a task does not turn the
|
||||
-- workspace into a non-task workspace.
|
||||
tasks.workspace_id = workspaces.id
|
||||
)
|
||||
WHEN sqlc.narg('has_ai_task') :: boolean IS NOT NULL THEN
|
||||
(COALESCE(latest_build.has_ai_task, false) OR (
|
||||
-- If the build has no AI task, it means that the provisioner job is in progress
|
||||
-- and we don't know if it has an AI task yet. In this case, we optimistically
|
||||
-- assume that it has an AI task if the AI Prompt parameter is not empty. This
|
||||
-- lets the AI Task frontend spawn a task and see it immediately after instead of
|
||||
-- having to wait for the build to complete.
|
||||
latest_build.has_ai_task IS NULL AND
|
||||
latest_build.completed_at IS NULL AND
|
||||
EXISTS (
|
||||
SELECT 1
|
||||
FROM workspace_build_parameters
|
||||
WHERE workspace_build_parameters.workspace_build_id = latest_build.id
|
||||
AND workspace_build_parameters.name = 'AI Prompt'
|
||||
AND workspace_build_parameters.value != ''
|
||||
)
|
||||
)) = (sqlc.narg('has_ai_task') :: boolean)
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by has_external_agent in latest build
|
||||
@@ -459,6 +466,7 @@ WHERE
|
||||
'', -- latest_build_error
|
||||
'start'::workspace_transition, -- latest_build_transition
|
||||
'unknown'::provisioner_job_status, -- latest_build_status
|
||||
false, -- latest_build_has_ai_task
|
||||
false -- latest_build_has_external_agent
|
||||
WHERE
|
||||
@with_summary :: boolean = true
|
||||
|
||||
@@ -2,14 +2,8 @@ package httpmw
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
@@ -29,34 +23,16 @@ func TaskParam(r *http.Request) database.Task {
|
||||
return task
|
||||
}
|
||||
|
||||
// ExtractTaskParam grabs a task from the "task" URL parameter.
|
||||
// It supports two lookup strategies:
|
||||
// 1. Task UUID (primary)
|
||||
// 2. Task name scoped to owner (secondary)
|
||||
//
|
||||
// This middleware depends on ExtractOrganizationMembersParam being in the chain
|
||||
// to provide the owner context for name-based lookups.
|
||||
// ExtractTaskParam grabs a task from the "task" URL parameter by UUID.
|
||||
func ExtractTaskParam(db database.Store) func(http.Handler) http.Handler {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get the task parameter value. We can't use ParseUUIDParam here because
|
||||
// we need to support non-UUID values (task names) and
|
||||
// attempt all lookup strategies.
|
||||
taskParam := chi.URLParam(r, "task")
|
||||
if taskParam == "" {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "\"task\" must be provided.",
|
||||
})
|
||||
taskID, parsed := ParseUUIDParam(rw, r, "task")
|
||||
if !parsed {
|
||||
return
|
||||
}
|
||||
|
||||
// Get owner from OrganizationMembersParam middleware for name-based lookups
|
||||
members := OrganizationMembersParam(r)
|
||||
ownerID := members.UserID()
|
||||
|
||||
task, err := fetchTaskWithFallback(ctx, db, taskParam, ownerID)
|
||||
task, err := db.GetTaskByID(ctx, taskID)
|
||||
if err != nil {
|
||||
if httpapi.Is404Error(err) {
|
||||
httpapi.ResourceNotFound(rw)
|
||||
@@ -72,38 +48,10 @@ func ExtractTaskParam(db database.Store) func(http.Handler) http.Handler {
|
||||
ctx = context.WithValue(ctx, taskParamContextKey{}, task)
|
||||
|
||||
if rlogger := loggermw.RequestLoggerFromContext(ctx); rlogger != nil {
|
||||
rlogger.WithFields(
|
||||
slog.F("task_id", task.ID),
|
||||
slog.F("task_name", task.Name),
|
||||
)
|
||||
rlogger.WithFields(slog.F("task_id", task.ID), slog.F("task_name", task.Name))
|
||||
}
|
||||
|
||||
next.ServeHTTP(rw, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func fetchTaskWithFallback(ctx context.Context, db database.Store, taskParam string, ownerID uuid.UUID) (database.Task, error) {
|
||||
// Attempt to first lookup the task by UUID.
|
||||
taskID, err := uuid.Parse(taskParam)
|
||||
if err == nil {
|
||||
task, err := db.GetTaskByID(ctx, taskID)
|
||||
if err == nil {
|
||||
return task, nil
|
||||
}
|
||||
// There may be a task named with a valid UUID. Fall back to name lookup in this case.
|
||||
if !errors.Is(err, sql.ErrNoRows) {
|
||||
return database.Task{}, xerrors.Errorf("fetch task by uuid: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// taskParam not a valid UUID, OR valid UUID but not found, so attempt lookup by name.
|
||||
task, err := db.GetTaskByOwnerIDAndName(ctx, database.GetTaskByOwnerIDAndNameParams{
|
||||
OwnerID: ownerID,
|
||||
Name: taskParam,
|
||||
})
|
||||
if err != nil {
|
||||
return database.Task{}, xerrors.Errorf("fetch task by name: %w", err)
|
||||
}
|
||||
return task, nil
|
||||
}
|
||||
|
||||
+55
-201
@@ -4,119 +4,35 @@ import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtestutil"
|
||||
"github.com/coder/coder/v2/coderd/httpmw"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
"github.com/coder/coder/v2/coderd/rbac/policy"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
func TestTaskParam(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create all fixtures once - they're only read, never modified
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
user := dbgen.User(t, db, database.User{})
|
||||
_, token := dbgen.APIKey(t, db, database.APIKey{
|
||||
UserID: user.ID,
|
||||
})
|
||||
org := dbgen.Organization(t, db, database.Organization{})
|
||||
tpl := dbgen.Template(t, db, database.Template{
|
||||
OrganizationID: org.ID,
|
||||
CreatedBy: user.ID,
|
||||
})
|
||||
tv := dbgen.TemplateVersion(t, db, database.TemplateVersion{
|
||||
TemplateID: uuid.NullUUID{
|
||||
UUID: tpl.ID,
|
||||
Valid: true,
|
||||
},
|
||||
OrganizationID: org.ID,
|
||||
CreatedBy: user.ID,
|
||||
})
|
||||
workspace := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OwnerID: user.ID,
|
||||
OrganizationID: org.ID,
|
||||
TemplateID: tpl.ID,
|
||||
})
|
||||
task := dbgen.Task(t, db, database.TaskTable{
|
||||
OrganizationID: org.ID,
|
||||
OwnerID: user.ID,
|
||||
TemplateVersionID: tv.ID,
|
||||
WorkspaceID: uuid.NullUUID{UUID: workspace.ID, Valid: true},
|
||||
Prompt: "test prompt",
|
||||
})
|
||||
workspaceNoTask := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OwnerID: user.ID,
|
||||
OrganizationID: org.ID,
|
||||
TemplateID: tpl.ID,
|
||||
})
|
||||
taskFoundByUUID := dbgen.Task(t, db, database.TaskTable{
|
||||
Name: "found-by-uuid",
|
||||
OrganizationID: org.ID,
|
||||
OwnerID: user.ID,
|
||||
TemplateVersionID: tv.ID,
|
||||
WorkspaceID: uuid.NullUUID{UUID: workspace.ID, Valid: true},
|
||||
Prompt: "test prompt",
|
||||
})
|
||||
// To test precedence of UUID over name, we create another task with the same name as the UUID task
|
||||
_ = dbgen.Task(t, db, database.TaskTable{
|
||||
Name: taskFoundByUUID.ID.String(),
|
||||
OrganizationID: org.ID,
|
||||
OwnerID: user.ID,
|
||||
TemplateVersionID: tv.ID,
|
||||
WorkspaceID: uuid.NullUUID{UUID: workspace.ID, Valid: true},
|
||||
Prompt: "test prompt",
|
||||
})
|
||||
workspaceSharedName := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
Name: "shared-name",
|
||||
OwnerID: user.ID,
|
||||
OrganizationID: org.ID,
|
||||
TemplateID: tpl.ID,
|
||||
})
|
||||
// We create a task with the same name as the workspace shared name.
|
||||
_ = dbgen.Task(t, db, database.TaskTable{
|
||||
Name: "task-different-name",
|
||||
OrganizationID: org.ID,
|
||||
OwnerID: user.ID,
|
||||
TemplateVersionID: tv.ID,
|
||||
WorkspaceID: uuid.NullUUID{UUID: workspaceSharedName.ID, Valid: true},
|
||||
Prompt: "test prompt",
|
||||
})
|
||||
setup := func(db database.Store) (*http.Request, database.User) {
|
||||
user := dbgen.User(t, db, database.User{})
|
||||
_, token := dbgen.APIKey(t, db, database.APIKey{
|
||||
UserID: user.ID,
|
||||
})
|
||||
|
||||
makeRequest := func(userID uuid.UUID, sessionToken string) *http.Request {
|
||||
r := httptest.NewRequest("GET", "/", nil)
|
||||
r.Header.Set(codersdk.SessionTokenHeader, sessionToken)
|
||||
r.Header.Set(codersdk.SessionTokenHeader, token)
|
||||
|
||||
ctx := chi.NewRouteContext()
|
||||
ctx.URLParams.Add("user", userID.String())
|
||||
ctx.URLParams.Add("user", "me")
|
||||
r = r.WithContext(context.WithValue(r.Context(), chi.RouteCtxKey, ctx))
|
||||
return r
|
||||
}
|
||||
|
||||
makeRouter := func(handler http.HandlerFunc) chi.Router {
|
||||
rtr := chi.NewRouter()
|
||||
rtr.Use(
|
||||
httpmw.ExtractAPIKeyMW(httpmw.ExtractAPIKeyConfig{
|
||||
DB: db,
|
||||
RedirectToLogin: false,
|
||||
}),
|
||||
httpmw.ExtractOrganizationMembersParam(db, func(r *http.Request, _ policy.Action, _ rbac.Objecter) bool {
|
||||
return true
|
||||
}),
|
||||
httpmw.ExtractTaskParam(db),
|
||||
)
|
||||
rtr.Get("/", handler)
|
||||
return rtr
|
||||
return r, user
|
||||
}
|
||||
|
||||
t.Run("None", func(t *testing.T) {
|
||||
@@ -124,11 +40,8 @@ func TestTaskParam(t *testing.T) {
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
rtr := chi.NewRouter()
|
||||
rtr.Use(httpmw.ExtractTaskParam(db))
|
||||
rtr.Get("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.Fail(t, "this should never get called")
|
||||
})
|
||||
r := httptest.NewRequest("GET", "/", nil)
|
||||
r = r.WithContext(context.WithValue(r.Context(), chi.RouteCtxKey, chi.NewRouteContext()))
|
||||
rtr.Get("/", nil)
|
||||
r, _ := setup(db)
|
||||
rw := httptest.NewRecorder()
|
||||
rtr.ServeHTTP(rw, r)
|
||||
|
||||
@@ -139,10 +52,11 @@ func TestTaskParam(t *testing.T) {
|
||||
|
||||
t.Run("NotFound", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
rtr := makeRouter(func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.Fail(t, "this should never get called")
|
||||
})
|
||||
r := makeRequest(user.ID, token)
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
rtr := chi.NewRouter()
|
||||
rtr.Use(httpmw.ExtractTaskParam(db))
|
||||
rtr.Get("/", nil)
|
||||
r, _ := setup(db)
|
||||
chi.RouteContext(r.Context()).URLParams.Add("task", uuid.NewString())
|
||||
rw := httptest.NewRecorder()
|
||||
rtr.ServeHTTP(rw, r)
|
||||
@@ -154,11 +68,47 @@ func TestTaskParam(t *testing.T) {
|
||||
|
||||
t.Run("Found", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
rtr := makeRouter(func(w http.ResponseWriter, r *http.Request) {
|
||||
foundTask := httpmw.TaskParam(r)
|
||||
assert.Equal(t, task.ID.String(), foundTask.ID.String())
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
rtr := chi.NewRouter()
|
||||
rtr.Use(
|
||||
httpmw.ExtractAPIKeyMW(httpmw.ExtractAPIKeyConfig{
|
||||
DB: db,
|
||||
RedirectToLogin: false,
|
||||
}),
|
||||
httpmw.ExtractTaskParam(db),
|
||||
)
|
||||
rtr.Get("/", func(rw http.ResponseWriter, r *http.Request) {
|
||||
_ = httpmw.TaskParam(r)
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
})
|
||||
r, user := setup(db)
|
||||
org := dbgen.Organization(t, db, database.Organization{})
|
||||
tpl := dbgen.Template(t, db, database.Template{
|
||||
OrganizationID: org.ID,
|
||||
CreatedBy: user.ID,
|
||||
})
|
||||
tv := dbgen.TemplateVersion(t, db, database.TemplateVersion{
|
||||
TemplateID: uuid.NullUUID{
|
||||
UUID: tpl.ID,
|
||||
Valid: true,
|
||||
},
|
||||
OrganizationID: org.ID,
|
||||
CreatedBy: user.ID,
|
||||
})
|
||||
workspace := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OwnerID: user.ID,
|
||||
Name: "test-workspace",
|
||||
OrganizationID: org.ID,
|
||||
TemplateID: tpl.ID,
|
||||
})
|
||||
task := dbgen.Task(t, db, database.TaskTable{
|
||||
Name: "test-task",
|
||||
OrganizationID: org.ID,
|
||||
OwnerID: user.ID,
|
||||
TemplateVersionID: tv.ID,
|
||||
WorkspaceID: uuid.NullUUID{UUID: workspace.ID, Valid: true},
|
||||
Prompt: "test prompt",
|
||||
})
|
||||
r := makeRequest(user.ID, token)
|
||||
chi.RouteContext(r.Context()).URLParams.Add("task", task.ID.String())
|
||||
rw := httptest.NewRecorder()
|
||||
rtr.ServeHTTP(rw, r)
|
||||
@@ -167,100 +117,4 @@ func TestTaskParam(t *testing.T) {
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusOK, res.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("FoundByTaskName", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
rtr := makeRouter(func(w http.ResponseWriter, r *http.Request) {
|
||||
foundTask := httpmw.TaskParam(r)
|
||||
assert.Equal(t, task.ID.String(), foundTask.ID.String())
|
||||
})
|
||||
r := makeRequest(user.ID, token)
|
||||
chi.RouteContext(r.Context()).URLParams.Add("task", task.Name)
|
||||
rw := httptest.NewRecorder()
|
||||
rtr.ServeHTTP(rw, r)
|
||||
|
||||
res := rw.Result()
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusOK, res.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("NotFoundByWorkspaceName", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
rtr := makeRouter(func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.Fail(t, "this should never get called")
|
||||
})
|
||||
r := makeRequest(user.ID, token)
|
||||
chi.RouteContext(r.Context()).URLParams.Add("task", workspace.Name)
|
||||
rw := httptest.NewRecorder()
|
||||
rtr.ServeHTTP(rw, r)
|
||||
|
||||
res := rw.Result()
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusNotFound, res.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("CaseInsensitiveTaskName", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
rtr := makeRouter(func(w http.ResponseWriter, r *http.Request) {
|
||||
foundTask := httpmw.TaskParam(r)
|
||||
assert.Equal(t, task.ID.String(), foundTask.ID.String())
|
||||
})
|
||||
r := makeRequest(user.ID, token)
|
||||
// Look up with different case
|
||||
chi.RouteContext(r.Context()).URLParams.Add("task", strings.ToUpper(task.Name))
|
||||
rw := httptest.NewRecorder()
|
||||
rtr.ServeHTTP(rw, r)
|
||||
|
||||
res := rw.Result()
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusOK, res.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("UUIDTakesPrecedence", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
rtr := makeRouter(func(w http.ResponseWriter, r *http.Request) {
|
||||
foundTask := httpmw.TaskParam(r)
|
||||
assert.Equal(t, taskFoundByUUID.ID.String(), foundTask.ID.String())
|
||||
})
|
||||
r := makeRequest(user.ID, token)
|
||||
// Look up by UUID - should find the first task, not the one named with the UUID
|
||||
chi.RouteContext(r.Context()).URLParams.Add("task", taskFoundByUUID.ID.String())
|
||||
rw := httptest.NewRecorder()
|
||||
rtr.ServeHTTP(rw, r)
|
||||
|
||||
res := rw.Result()
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusOK, res.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("NotFoundWhenNoMatch", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
rtr := makeRouter(func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.Fail(t, "this should never get called")
|
||||
})
|
||||
r := makeRequest(user.ID, token)
|
||||
chi.RouteContext(r.Context()).URLParams.Add("task", "nonexistent-name")
|
||||
rw := httptest.NewRecorder()
|
||||
rtr.ServeHTTP(rw, r)
|
||||
|
||||
res := rw.Result()
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusNotFound, res.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("WorkspaceWithoutTask", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
rtr := makeRouter(func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.Fail(t, "this should never get called")
|
||||
})
|
||||
r := makeRequest(user.ID, token)
|
||||
// Look up by workspace name, but workspace has no task
|
||||
chi.RouteContext(r.Context()).URLParams.Add("task", workspaceNoTask.Name)
|
||||
rw := httptest.NewRecorder()
|
||||
rtr.ServeHTTP(rw, r)
|
||||
|
||||
res := rw.Result()
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusNotFound, res.StatusCode)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -118,7 +118,6 @@ func ExtractWorkspaceAgentAndLatestBuild(opts ExtractWorkspaceAgentAndLatestBuil
|
||||
OwnerID: row.WorkspaceTable.OwnerID,
|
||||
TemplateID: row.WorkspaceTable.TemplateID,
|
||||
VersionID: row.WorkspaceBuild.TemplateVersionID,
|
||||
TaskID: row.TaskID,
|
||||
BlockUserData: row.WorkspaceAgent.APIKeyScope == database.AgentKeyScopeEnumNoUserData,
|
||||
}),
|
||||
)
|
||||
|
||||
@@ -18,7 +18,6 @@ func GetAuthorizationServerMetadata(accessURL *url.URL) http.HandlerFunc {
|
||||
AuthorizationEndpoint: accessURL.JoinPath("/oauth2/authorize").String(),
|
||||
TokenEndpoint: accessURL.JoinPath("/oauth2/tokens").String(),
|
||||
RegistrationEndpoint: accessURL.JoinPath("/oauth2/register").String(), // RFC 7591
|
||||
RevocationEndpoint: accessURL.JoinPath("/oauth2/revoke").String(), // RFC 7009
|
||||
ResponseTypesSupported: []string{"code"},
|
||||
GrantTypesSupported: []string{"authorization_code", "refresh_token"},
|
||||
CodeChallengeMethodsSupported: []string{"S256"},
|
||||
|
||||
@@ -2006,12 +2006,10 @@ func (s *server) completeWorkspaceBuildJob(ctx context.Context, job database.Pro
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
hasAITask bool
|
||||
unknownAppID string
|
||||
taskAppID uuid.NullUUID
|
||||
taskAgentID uuid.NullUUID
|
||||
)
|
||||
var taskAppID uuid.NullUUID
|
||||
var taskAgentID uuid.NullUUID
|
||||
var hasAITask bool
|
||||
var warnUnknownTaskAppID bool
|
||||
if tasks := jobType.WorkspaceBuild.GetAiTasks(); len(tasks) > 0 {
|
||||
hasAITask = true
|
||||
task := tasks[0]
|
||||
@@ -2028,29 +2026,59 @@ func (s *server) completeWorkspaceBuildJob(ctx context.Context, job database.Pro
|
||||
}
|
||||
|
||||
if !slices.Contains(appIDs, appID) {
|
||||
unknownAppID = appID
|
||||
hasAITask = false
|
||||
} else {
|
||||
// Only parse for valid app and agent to avoid fk violation.
|
||||
id, err := uuid.Parse(appID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse app id: %w", err)
|
||||
}
|
||||
taskAppID = uuid.NullUUID{UUID: id, Valid: true}
|
||||
warnUnknownTaskAppID = true
|
||||
}
|
||||
|
||||
agentID, ok := agentIDByAppID[appID]
|
||||
taskAgentID = uuid.NullUUID{UUID: agentID, Valid: ok}
|
||||
id, err := uuid.Parse(appID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse app id: %w", err)
|
||||
}
|
||||
|
||||
taskAppID = uuid.NullUUID{UUID: id, Valid: true}
|
||||
|
||||
agentID, ok := agentIDByAppID[appID]
|
||||
taskAgentID = uuid.NullUUID{UUID: agentID, Valid: ok}
|
||||
}
|
||||
|
||||
// This is a hacky workaround for the issue with tasks 'disappearing' on stop:
|
||||
// reuse has_ai_task and sidebar_app_id from the previous build.
|
||||
// This workaround should be removed as soon as possible.
|
||||
if workspaceBuild.Transition == database.WorkspaceTransitionStop && workspaceBuild.BuildNumber > 1 {
|
||||
if prevBuild, err := s.Database.GetWorkspaceBuildByWorkspaceIDAndBuildNumber(ctx, database.GetWorkspaceBuildByWorkspaceIDAndBuildNumberParams{
|
||||
WorkspaceID: workspaceBuild.WorkspaceID,
|
||||
BuildNumber: workspaceBuild.BuildNumber - 1,
|
||||
}); err == nil {
|
||||
hasAITask = prevBuild.HasAITask.Bool
|
||||
taskAppID = prevBuild.AITaskSidebarAppID
|
||||
warnUnknownTaskAppID = false
|
||||
s.Logger.Debug(ctx, "task workaround: reused has_ai_task and app_id from previous build to keep track of task",
|
||||
slog.F("job_id", job.ID.String()),
|
||||
slog.F("build_number", prevBuild.BuildNumber),
|
||||
slog.F("workspace_id", workspace.ID),
|
||||
slog.F("workspace_build_id", workspaceBuild.ID),
|
||||
slog.F("transition", string(workspaceBuild.Transition)),
|
||||
slog.F("sidebar_app_id", taskAppID.UUID),
|
||||
slog.F("has_ai_task", hasAITask),
|
||||
)
|
||||
} else {
|
||||
s.Logger.Error(ctx, "task workaround: tracking via has_ai_task and app_id from previous build failed",
|
||||
slog.Error(err),
|
||||
slog.F("job_id", job.ID.String()),
|
||||
slog.F("workspace_id", workspace.ID),
|
||||
slog.F("workspace_build_id", workspaceBuild.ID),
|
||||
slog.F("transition", string(workspaceBuild.Transition)),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if unknownAppID != "" && workspaceBuild.Transition == database.WorkspaceTransitionStart {
|
||||
if warnUnknownTaskAppID {
|
||||
// Ref: https://github.com/coder/coder/issues/18776
|
||||
// This can happen for a number of reasons:
|
||||
// 1. Misconfigured template
|
||||
// 2. Count=0 on the agent due to stop transition, meaning the associated coder_app was not inserted.
|
||||
// Failing the build at this point is not ideal, so log a warning instead.
|
||||
s.Logger.Warn(ctx, "unknown ai_task_app_id",
|
||||
slog.F("ai_task_app_id", unknownAppID),
|
||||
slog.F("ai_task_app_id", taskAppID.UUID.String()),
|
||||
slog.F("job_id", job.ID.String()),
|
||||
slog.F("workspace_id", workspace.ID),
|
||||
slog.F("workspace_build_id", workspaceBuild.ID),
|
||||
@@ -2077,6 +2105,9 @@ func (s *server) completeWorkspaceBuildJob(ctx context.Context, job database.Pro
|
||||
slog.F("transition", string(workspaceBuild.Transition)),
|
||||
)
|
||||
}
|
||||
// Important: reset hasAITask and sidebarAppID so that we don't run into a fk constraint violation.
|
||||
hasAITask = false
|
||||
taskAppID = uuid.NullUUID{}
|
||||
}
|
||||
|
||||
if hasAITask && workspaceBuild.Transition == database.WorkspaceTransitionStart {
|
||||
@@ -2093,6 +2124,14 @@ func (s *server) completeWorkspaceBuildJob(ctx context.Context, job database.Pro
|
||||
}
|
||||
}
|
||||
|
||||
hasExternalAgent := false
|
||||
for _, resource := range jobType.WorkspaceBuild.Resources {
|
||||
if resource.Type == "coder_external_agent" {
|
||||
hasExternalAgent = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if task, err := db.GetTaskByWorkspaceID(ctx, workspace.ID); err == nil {
|
||||
// Irrespective of whether the agent or sidebar app is present,
|
||||
// perform the upsert to ensure a link between the task and
|
||||
@@ -2114,9 +2153,8 @@ func (s *server) completeWorkspaceBuildJob(ctx context.Context, job database.Pro
|
||||
return xerrors.Errorf("get task by workspace id: %w", err)
|
||||
}
|
||||
|
||||
_, hasExternalAgent := slice.Find(jobType.WorkspaceBuild.Resources, func(resource *sdkproto.Resource) bool {
|
||||
return resource.Type == "coder_external_agent"
|
||||
})
|
||||
// Regardless of whether there is an AI task or not, update the field to indicate one way or the other since it
|
||||
// always defaults to nil. ONLY if has_ai_task=true MUST ai_task_sidebar_app_id be set.
|
||||
if err := db.UpdateWorkspaceBuildFlagsByID(ctx, database.UpdateWorkspaceBuildFlagsByIDParams{
|
||||
ID: workspaceBuild.ID,
|
||||
HasAITask: sql.NullBool{
|
||||
@@ -2127,7 +2165,8 @@ func (s *server) completeWorkspaceBuildJob(ctx context.Context, job database.Pro
|
||||
Bool: hasExternalAgent,
|
||||
Valid: true,
|
||||
},
|
||||
UpdatedAt: now,
|
||||
SidebarAppID: taskAppID,
|
||||
UpdatedAt: now,
|
||||
}); err != nil {
|
||||
return xerrors.Errorf("update workspace build ai tasks and external agent flag: %w", err)
|
||||
}
|
||||
|
||||
@@ -2864,12 +2864,11 @@ func TestCompleteJob(t *testing.T) {
|
||||
input *proto.CompletedJob_WorkspaceBuild
|
||||
isTask bool
|
||||
expectTaskStatus database.TaskStatus
|
||||
expectAppID uuid.NullUUID
|
||||
expectHasAiTask bool
|
||||
expectUsageEvent bool
|
||||
}
|
||||
|
||||
sidebarAppID := uuid.New()
|
||||
sidebarAppID := uuid.NewString()
|
||||
for _, tc := range []testcase{
|
||||
{
|
||||
name: "has_ai_task is false by default",
|
||||
@@ -2884,45 +2883,12 @@ func TestCompleteJob(t *testing.T) {
|
||||
{
|
||||
name: "has_ai_task is set to true",
|
||||
transition: database.WorkspaceTransitionStart,
|
||||
input: &proto.CompletedJob_WorkspaceBuild{
|
||||
AiTasks: []*sdkproto.AITask{
|
||||
{
|
||||
Id: uuid.NewString(),
|
||||
AppId: sidebarAppID.String(),
|
||||
},
|
||||
},
|
||||
Resources: []*sdkproto.Resource{
|
||||
{
|
||||
Agents: []*sdkproto.Agent{
|
||||
{
|
||||
Id: uuid.NewString(),
|
||||
Name: "a",
|
||||
Apps: []*sdkproto.App{
|
||||
{
|
||||
Id: sidebarAppID.String(),
|
||||
Slug: "test-app",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
isTask: true,
|
||||
expectTaskStatus: database.TaskStatusInitializing,
|
||||
expectAppID: uuid.NullUUID{UUID: sidebarAppID, Valid: true},
|
||||
expectHasAiTask: true,
|
||||
expectUsageEvent: true,
|
||||
},
|
||||
{
|
||||
name: "has_ai_task is set to true, with sidebar app id",
|
||||
transition: database.WorkspaceTransitionStart,
|
||||
input: &proto.CompletedJob_WorkspaceBuild{
|
||||
AiTasks: []*sdkproto.AITask{
|
||||
{
|
||||
Id: uuid.NewString(),
|
||||
SidebarApp: &sdkproto.AITaskSidebarApp{
|
||||
Id: sidebarAppID.String(),
|
||||
Id: sidebarAppID,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -2934,7 +2900,7 @@ func TestCompleteJob(t *testing.T) {
|
||||
Name: "a",
|
||||
Apps: []*sdkproto.App{
|
||||
{
|
||||
Id: sidebarAppID.String(),
|
||||
Id: sidebarAppID,
|
||||
Slug: "test-app",
|
||||
},
|
||||
},
|
||||
@@ -2945,7 +2911,6 @@ func TestCompleteJob(t *testing.T) {
|
||||
},
|
||||
isTask: true,
|
||||
expectTaskStatus: database.TaskStatusInitializing,
|
||||
expectAppID: uuid.NullUUID{UUID: sidebarAppID, Valid: true},
|
||||
expectHasAiTask: true,
|
||||
expectUsageEvent: true,
|
||||
},
|
||||
@@ -2957,9 +2922,10 @@ func TestCompleteJob(t *testing.T) {
|
||||
AiTasks: []*sdkproto.AITask{
|
||||
{
|
||||
Id: uuid.NewString(),
|
||||
// Non-existing app ID would previously trigger a FK violation.
|
||||
// Now it should just be ignored.
|
||||
AppId: sidebarAppID.String(),
|
||||
SidebarApp: &sdkproto.AITaskSidebarApp{
|
||||
// Non-existing app ID would previously trigger a FK violation.
|
||||
Id: uuid.NewString(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -2974,8 +2940,10 @@ func TestCompleteJob(t *testing.T) {
|
||||
input: &proto.CompletedJob_WorkspaceBuild{
|
||||
AiTasks: []*sdkproto.AITask{
|
||||
{
|
||||
Id: uuid.NewString(),
|
||||
AppId: sidebarAppID.String(),
|
||||
Id: uuid.NewString(),
|
||||
SidebarApp: &sdkproto.AITaskSidebarApp{
|
||||
Id: sidebarAppID,
|
||||
},
|
||||
},
|
||||
},
|
||||
Resources: []*sdkproto.Resource{
|
||||
@@ -2986,7 +2954,7 @@ func TestCompleteJob(t *testing.T) {
|
||||
Name: "a",
|
||||
Apps: []*sdkproto.App{
|
||||
{
|
||||
Id: sidebarAppID.String(),
|
||||
Id: sidebarAppID,
|
||||
Slug: "test-app",
|
||||
},
|
||||
},
|
||||
@@ -2997,7 +2965,6 @@ func TestCompleteJob(t *testing.T) {
|
||||
},
|
||||
isTask: true,
|
||||
expectTaskStatus: database.TaskStatusPaused,
|
||||
expectAppID: uuid.NullUUID{UUID: sidebarAppID, Valid: true},
|
||||
expectHasAiTask: true,
|
||||
expectUsageEvent: false,
|
||||
},
|
||||
@@ -3011,7 +2978,7 @@ func TestCompleteJob(t *testing.T) {
|
||||
},
|
||||
isTask: true,
|
||||
expectTaskStatus: database.TaskStatusPaused,
|
||||
expectHasAiTask: false, // We no longer inherit this from the previous build.
|
||||
expectHasAiTask: true,
|
||||
expectUsageEvent: false,
|
||||
},
|
||||
} {
|
||||
@@ -3125,15 +3092,15 @@ func TestCompleteJob(t *testing.T) {
|
||||
require.True(t, build.HasAITask.Valid) // We ALWAYS expect a value to be set, therefore not nil, i.e. valid = true.
|
||||
require.Equal(t, tc.expectHasAiTask, build.HasAITask.Bool)
|
||||
|
||||
task, err := db.GetTaskByID(ctx, genTask.ID)
|
||||
if tc.isTask {
|
||||
task, err := db.GetTaskByID(ctx, genTask.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectTaskStatus, task.Status)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, tc.expectAppID, task.WorkspaceAppID)
|
||||
if tc.expectHasAiTask && build.Transition != database.WorkspaceTransitionStop {
|
||||
require.Equal(t, sidebarAppID, build.AITaskSidebarAppID.UUID.String())
|
||||
}
|
||||
|
||||
if tc.expectUsageEvent {
|
||||
// Check that a usage event was collected.
|
||||
@@ -4124,7 +4091,6 @@ func TestServer_ExpirePrebuildsSessionToken(t *testing.T) {
|
||||
job, err := fs.waitForJob()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, job)
|
||||
require.NotNil(t, job.Type, "acquired job type was nil?!")
|
||||
workspaceBuildJob := job.Type.(*proto.AcquiredJob_WorkspaceBuild_).WorkspaceBuild
|
||||
require.NotNil(t, workspaceBuildJob.Metadata)
|
||||
|
||||
@@ -4447,18 +4413,19 @@ func seedPreviousWorkspaceStartWithAITask(ctx context.Context, t testing.TB, db
|
||||
agt := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{
|
||||
ResourceID: res.ID,
|
||||
})
|
||||
_ = dbgen.WorkspaceApp(t, db, database.WorkspaceApp{
|
||||
wa := dbgen.WorkspaceApp(t, db, database.WorkspaceApp{
|
||||
AgentID: agt.ID,
|
||||
})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
|
||||
BuildNumber: 1,
|
||||
HasAITask: sql.NullBool{Valid: true, Bool: true},
|
||||
ID: w.ID,
|
||||
InitiatorID: w.OwnerID,
|
||||
JobID: prevJob.ID,
|
||||
TemplateVersionID: tvs[0].ID,
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
WorkspaceID: w.ID,
|
||||
BuildNumber: 1,
|
||||
HasAITask: sql.NullBool{Valid: true, Bool: true},
|
||||
AITaskSidebarAppID: uuid.NullUUID{Valid: true, UUID: wa.ID},
|
||||
ID: w.ID,
|
||||
InitiatorID: w.OwnerID,
|
||||
JobID: prevJob.ID,
|
||||
TemplateVersionID: tvs[0].ID,
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
WorkspaceID: w.ID,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
+2
-12
@@ -18,7 +18,6 @@ type WorkspaceAgentScopeParams struct {
|
||||
OwnerID uuid.UUID
|
||||
TemplateID uuid.UUID
|
||||
VersionID uuid.UUID
|
||||
TaskID uuid.NullUUID
|
||||
BlockUserData bool
|
||||
}
|
||||
|
||||
@@ -43,15 +42,6 @@ func WorkspaceAgentScope(params WorkspaceAgentScopeParams) Scope {
|
||||
panic("failed to expand scope, this should never happen")
|
||||
}
|
||||
|
||||
// Include task in the allow list if the workspace has an associated task.
|
||||
var extraAllowList []AllowListElement
|
||||
if params.TaskID.Valid {
|
||||
extraAllowList = append(extraAllowList, AllowListElement{
|
||||
Type: ResourceTask.Type,
|
||||
ID: params.TaskID.UUID.String(),
|
||||
})
|
||||
}
|
||||
|
||||
return Scope{
|
||||
// TODO: We want to limit the role too to be extra safe.
|
||||
// Even though the allowlist blocks anything else, it is still good
|
||||
@@ -62,12 +52,12 @@ func WorkspaceAgentScope(params WorkspaceAgentScopeParams) Scope {
|
||||
// Limit the agent to only be able to access the singular workspace and
|
||||
// the template/version it was created from. Add additional resources here
|
||||
// as needed, but do not add more workspace or template resource ids.
|
||||
AllowIDList: append([]AllowListElement{
|
||||
AllowIDList: []AllowListElement{
|
||||
{Type: ResourceWorkspace.Type, ID: params.WorkspaceID.String()},
|
||||
{Type: ResourceTemplate.Type, ID: params.TemplateID.String()},
|
||||
{Type: ResourceTemplate.Type, ID: params.VersionID.String()},
|
||||
{Type: ResourceUser.Type, ID: params.OwnerID.String()},
|
||||
}, extraAllowList...),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -145,12 +145,13 @@ func TestTelemetry(t *testing.T) {
|
||||
AgentID: taskWsAgent.ID,
|
||||
})
|
||||
taskWB := dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
Reason: database.BuildReasonAutostart,
|
||||
WorkspaceID: taskWs.ID,
|
||||
TemplateVersionID: tv.ID,
|
||||
JobID: taskJob.ID,
|
||||
HasAITask: sql.NullBool{Valid: true, Bool: true},
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
Reason: database.BuildReasonAutostart,
|
||||
WorkspaceID: taskWs.ID,
|
||||
TemplateVersionID: tv.ID,
|
||||
JobID: taskJob.ID,
|
||||
HasAITask: sql.NullBool{Valid: true, Bool: true},
|
||||
AITaskSidebarAppID: uuid.NullUUID{Valid: true, UUID: taskWsApp.ID},
|
||||
})
|
||||
task := dbgen.Task(t, db, database.TaskTable{
|
||||
OwnerID: user.ID,
|
||||
|
||||
+11
-3
@@ -1130,11 +1130,19 @@ func (api *API) convertTemplate(
|
||||
|
||||
// findTemplateAdmins fetches all users with template admin permission including owners.
|
||||
func findTemplateAdmins(ctx context.Context, store database.Store) ([]database.GetUsersRow, error) {
|
||||
templateAdmins, err := store.GetUsers(ctx, database.GetUsersParams{
|
||||
RbacRole: []string{codersdk.RoleTemplateAdmin, codersdk.RoleOwner},
|
||||
// Notice: we can't scrape the user information in parallel as pq
|
||||
// fails with: unexpected describe rows response: 'D'
|
||||
owners, err := store.GetUsers(ctx, database.GetUsersParams{
|
||||
RbacRole: []string{codersdk.RoleOwner},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get owners: %w", err)
|
||||
}
|
||||
return templateAdmins, nil
|
||||
templateAdmins, err := store.GetUsers(ctx, database.GetUsersParams{
|
||||
RbacRole: []string{codersdk.RoleTemplateAdmin},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get template admins: %w", err)
|
||||
}
|
||||
return append(owners, templateAdmins...), nil
|
||||
}
|
||||
|
||||
+11
-3
@@ -1537,13 +1537,21 @@ func (api *API) CreateUser(ctx context.Context, store database.Store, req Create
|
||||
|
||||
// findUserAdmins fetches all users with user admin permission including owners.
|
||||
func findUserAdmins(ctx context.Context, store database.Store) ([]database.GetUsersRow, error) {
|
||||
userAdmins, err := store.GetUsers(ctx, database.GetUsersParams{
|
||||
RbacRole: []string{codersdk.RoleOwner, codersdk.RoleUserAdmin},
|
||||
// Notice: we can't scrape the user information in parallel as pq
|
||||
// fails with: unexpected describe rows response: 'D'
|
||||
owners, err := store.GetUsers(ctx, database.GetUsersParams{
|
||||
RbacRole: []string{codersdk.RoleOwner},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get owners: %w", err)
|
||||
}
|
||||
return userAdmins, nil
|
||||
userAdmins, err := store.GetUsers(ctx, database.GetUsersParams{
|
||||
RbacRole: []string{codersdk.RoleUserAdmin},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get user admins: %w", err)
|
||||
}
|
||||
return append(owners, userAdmins...), nil
|
||||
}
|
||||
|
||||
func convertUsers(users []database.User, organizationIDsByUserID map[uuid.UUID][]uuid.UUID) []codersdk.User {
|
||||
|
||||
+23
-39
@@ -599,28 +599,21 @@ func TestNotifyDeletedUser(t *testing.T) {
|
||||
// then
|
||||
sent := notifyEnq.Sent()
|
||||
require.Len(t, sent, 5)
|
||||
// Other notifications:
|
||||
// "User admin" account created, "owner" notified
|
||||
// "Member" account created, "owner" notified
|
||||
// "Member" account created, "user admin" notified
|
||||
// sent[0]: "User admin" account created, "owner" notified
|
||||
// sent[1]: "Member" account created, "owner" notified
|
||||
// sent[2]: "Member" account created, "user admin" notified
|
||||
|
||||
// "Member" account deleted, "owner" notified
|
||||
ownerNotifications := notifyEnq.Sent(func(n *notificationstest.FakeNotification) bool {
|
||||
return n.TemplateID == notifications.TemplateUserAccountDeleted &&
|
||||
n.UserID == firstUser.UserID &&
|
||||
slices.Contains(n.Targets, member.ID) &&
|
||||
n.Labels["deleted_account_name"] == member.Username
|
||||
})
|
||||
require.Len(t, ownerNotifications, 1)
|
||||
require.Equal(t, notifications.TemplateUserAccountDeleted, sent[3].TemplateID)
|
||||
require.Equal(t, firstUser.UserID, sent[3].UserID)
|
||||
require.Contains(t, sent[3].Targets, member.ID)
|
||||
require.Equal(t, member.Username, sent[3].Labels["deleted_account_name"])
|
||||
|
||||
// "Member" account deleted, "user admin" notified
|
||||
adminNotifications := notifyEnq.Sent(func(n *notificationstest.FakeNotification) bool {
|
||||
return n.TemplateID == notifications.TemplateUserAccountDeleted &&
|
||||
n.UserID == userAdmin.ID &&
|
||||
slices.Contains(n.Targets, member.ID) &&
|
||||
n.Labels["deleted_account_name"] == member.Username
|
||||
})
|
||||
require.Len(t, adminNotifications, 1)
|
||||
require.Equal(t, notifications.TemplateUserAccountDeleted, sent[4].TemplateID)
|
||||
require.Equal(t, userAdmin.ID, sent[4].UserID)
|
||||
require.Contains(t, sent[4].Targets, member.ID)
|
||||
require.Equal(t, member.Username, sent[4].Labels["deleted_account_name"])
|
||||
})
|
||||
}
|
||||
|
||||
@@ -967,31 +960,22 @@ func TestNotifyCreatedUser(t *testing.T) {
|
||||
require.Len(t, sent, 3)
|
||||
|
||||
// "User admin" account created, "owner" notified
|
||||
ownerNotifiedAboutUserAdmin := notifyEnq.Sent(func(n *notificationstest.FakeNotification) bool {
|
||||
return n.TemplateID == notifications.TemplateUserAccountCreated &&
|
||||
n.UserID == firstUser.UserID &&
|
||||
slices.Contains(n.Targets, userAdmin.ID) &&
|
||||
n.Labels["created_account_name"] == userAdmin.Username
|
||||
})
|
||||
require.Len(t, ownerNotifiedAboutUserAdmin, 1)
|
||||
require.Equal(t, notifications.TemplateUserAccountCreated, sent[0].TemplateID)
|
||||
require.Equal(t, firstUser.UserID, sent[0].UserID)
|
||||
require.Contains(t, sent[0].Targets, userAdmin.ID)
|
||||
require.Equal(t, userAdmin.Username, sent[0].Labels["created_account_name"])
|
||||
|
||||
// "Member" account created, "owner" notified
|
||||
ownerNotifiedAboutMember := notifyEnq.Sent(func(n *notificationstest.FakeNotification) bool {
|
||||
return n.TemplateID == notifications.TemplateUserAccountCreated &&
|
||||
n.UserID == firstUser.UserID &&
|
||||
slices.Contains(n.Targets, member.ID) &&
|
||||
n.Labels["created_account_name"] == member.Username
|
||||
})
|
||||
require.Len(t, ownerNotifiedAboutMember, 1)
|
||||
require.Equal(t, notifications.TemplateUserAccountCreated, sent[1].TemplateID)
|
||||
require.Equal(t, firstUser.UserID, sent[1].UserID)
|
||||
require.Contains(t, sent[1].Targets, member.ID)
|
||||
require.Equal(t, member.Username, sent[1].Labels["created_account_name"])
|
||||
|
||||
// "Member" account created, "user admin" notified
|
||||
userAdminNotifiedAboutMember := notifyEnq.Sent(func(n *notificationstest.FakeNotification) bool {
|
||||
return n.TemplateID == notifications.TemplateUserAccountCreated &&
|
||||
n.UserID == userAdmin.ID &&
|
||||
slices.Contains(n.Targets, member.ID) &&
|
||||
n.Labels["created_account_name"] == member.Username
|
||||
})
|
||||
require.Len(t, userAdminNotifiedAboutMember, 1)
|
||||
require.Equal(t, notifications.TemplateUserAccountCreated, sent[1].TemplateID)
|
||||
require.Equal(t, userAdmin.ID, sent[2].UserID)
|
||||
require.Contains(t, sent[2].Targets, member.ID)
|
||||
require.Equal(t, member.Username, sent[2].Labels["created_account_name"])
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
+56
-58
@@ -428,7 +428,7 @@ func (api *API) patchWorkspaceAgentAppStatus(rw http.ResponseWriter, r *http.Req
|
||||
})
|
||||
|
||||
// Notify on state change to Working/Idle for AI tasks
|
||||
api.enqueueAITaskStateNotification(ctx, app.ID, latestAppStatus, req.State, workspace, workspaceAgent)
|
||||
api.enqueueAITaskStateNotification(ctx, app.ID, latestAppStatus, req.State, workspace)
|
||||
|
||||
httpapi.Write(ctx, rw, http.StatusOK, nil)
|
||||
}
|
||||
@@ -437,15 +437,13 @@ func (api *API) patchWorkspaceAgentAppStatus(rw http.ResponseWriter, r *http.Req
|
||||
// transitions to Working or Idle.
|
||||
// No-op if:
|
||||
// - the workspace agent app isn't configured as an AI task,
|
||||
// - the new state equals the latest persisted state,
|
||||
// - the workspace agent is not ready (still starting up).
|
||||
// - the new state equals the latest persisted state.
|
||||
func (api *API) enqueueAITaskStateNotification(
|
||||
ctx context.Context,
|
||||
appID uuid.UUID,
|
||||
latestAppStatus []database.WorkspaceAppStatus,
|
||||
newAppStatus codersdk.WorkspaceAppStatusState,
|
||||
workspace database.Workspace,
|
||||
agent database.WorkspaceAgent,
|
||||
) {
|
||||
// Select notification template based on the new state
|
||||
var notificationTemplate uuid.UUID
|
||||
@@ -463,67 +461,67 @@ func (api *API) enqueueAITaskStateNotification(
|
||||
return
|
||||
}
|
||||
|
||||
if !workspace.TaskID.Valid {
|
||||
// Workspace has no task ID, do nothing.
|
||||
return
|
||||
}
|
||||
|
||||
// Only send notifications when the agent is ready. We want to skip
|
||||
// any state transitions that occur whilst the workspace is starting
|
||||
// up as it doesn't make sense to receive them.
|
||||
if agent.LifecycleState != database.WorkspaceAgentLifecycleStateReady {
|
||||
api.Logger.Debug(ctx, "skipping AI task notification because agent is not ready",
|
||||
slog.F("agent_id", agent.ID),
|
||||
slog.F("lifecycle_state", agent.LifecycleState),
|
||||
slog.F("new_app_status", newAppStatus),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
task, err := api.Database.GetTaskByID(ctx, workspace.TaskID.UUID)
|
||||
workspaceBuild, err := api.Database.GetLatestWorkspaceBuildByWorkspaceID(ctx, workspace.ID)
|
||||
if err != nil {
|
||||
api.Logger.Warn(ctx, "failed to get task", slog.Error(err))
|
||||
api.Logger.Warn(ctx, "failed to get workspace build", slog.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if !task.WorkspaceAppID.Valid || task.WorkspaceAppID.UUID != appID {
|
||||
// Non-task app, do nothing.
|
||||
return
|
||||
}
|
||||
// Confirm Workspace Agent App is an AI Task
|
||||
if workspaceBuild.HasAITask.Valid && workspaceBuild.HasAITask.Bool &&
|
||||
workspaceBuild.AITaskSidebarAppID.Valid && workspaceBuild.AITaskSidebarAppID.UUID == appID {
|
||||
// Skip if the latest persisted state equals the new state (no new transition)
|
||||
if len(latestAppStatus) > 0 && latestAppStatus[0].State == database.WorkspaceAppStatusState(newAppStatus) {
|
||||
return
|
||||
}
|
||||
|
||||
// Skip if the latest persisted state equals the new state (no new transition)
|
||||
if len(latestAppStatus) > 0 && latestAppStatus[0].State == database.WorkspaceAppStatusState(newAppStatus) {
|
||||
return
|
||||
}
|
||||
// Skip the initial "Working" notification when task first starts.
|
||||
// This is obvious to the user since they just created the task.
|
||||
// We still notify on first "Idle" status and all subsequent transitions.
|
||||
if len(latestAppStatus) == 0 && newAppStatus == codersdk.WorkspaceAppStatusStateWorking {
|
||||
return
|
||||
}
|
||||
|
||||
// Skip the initial "Working" notification when task first starts.
|
||||
// This is obvious to the user since they just created the task.
|
||||
// We still notify on first "Idle" status and all subsequent transitions.
|
||||
if len(latestAppStatus) == 0 && newAppStatus == codersdk.WorkspaceAppStatusStateWorking {
|
||||
return
|
||||
}
|
||||
// Use the task prompt as the "task" label, fallback to workspace name
|
||||
parameters, err := api.Database.GetWorkspaceBuildParameters(ctx, workspaceBuild.ID)
|
||||
if err != nil {
|
||||
api.Logger.Warn(ctx, "failed to get workspace build parameters", slog.Error(err))
|
||||
return
|
||||
}
|
||||
taskName := workspace.Name
|
||||
for _, param := range parameters {
|
||||
if param.Name == codersdk.AITaskPromptParameterName {
|
||||
taskName = param.Value
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := api.NotificationsEnqueuer.EnqueueWithData(
|
||||
// nolint:gocritic // Need notifier actor to enqueue notifications
|
||||
dbauthz.AsNotifier(ctx),
|
||||
workspace.OwnerID,
|
||||
notificationTemplate,
|
||||
map[string]string{
|
||||
"task": task.Name,
|
||||
"workspace": workspace.Name,
|
||||
},
|
||||
map[string]any{
|
||||
// Use a 1-minute bucketed timestamp to bypass per-day dedupe,
|
||||
// allowing identical content to resend within the same day
|
||||
// (but not more than once every 10s).
|
||||
"dedupe_bypass_ts": api.Clock.Now().UTC().Truncate(time.Minute),
|
||||
},
|
||||
"api-workspace-agent-app-status",
|
||||
// Associate this notification with related entities
|
||||
workspace.ID, workspace.OwnerID, workspace.OrganizationID, appID,
|
||||
); err != nil {
|
||||
api.Logger.Warn(ctx, "failed to notify of task state", slog.Error(err))
|
||||
return
|
||||
// As task prompt may be particularly long, truncate it to 160 characters for notifications.
|
||||
if len(taskName) > 160 {
|
||||
taskName = strutil.Truncate(taskName, 160, strutil.TruncateWithEllipsis, strutil.TruncateWithFullWords)
|
||||
}
|
||||
|
||||
if _, err := api.NotificationsEnqueuer.EnqueueWithData(
|
||||
// nolint:gocritic // Need notifier actor to enqueue notifications
|
||||
dbauthz.AsNotifier(ctx),
|
||||
workspace.OwnerID,
|
||||
notificationTemplate,
|
||||
map[string]string{
|
||||
"task": taskName,
|
||||
"workspace": workspace.Name,
|
||||
},
|
||||
map[string]any{
|
||||
// Use a 1-minute bucketed timestamp to bypass per-day dedupe,
|
||||
// allowing identical content to resend within the same day
|
||||
// (but not more than once every 10s).
|
||||
"dedupe_bypass_ts": api.Clock.Now().UTC().Truncate(time.Minute),
|
||||
},
|
||||
"api-workspace-agent-app-status",
|
||||
// Associate this notification with related entities
|
||||
workspace.ID, workspace.OwnerID, workspace.OrganizationID, appID,
|
||||
); err != nil {
|
||||
api.Logger.Warn(ctx, "failed to notify of task state", slog.Error(err))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1190,6 +1190,11 @@ func (api *API) convertWorkspaceBuild(
|
||||
if build.HasAITask.Valid {
|
||||
hasAITask = &build.HasAITask.Bool
|
||||
}
|
||||
var taskAppID *uuid.UUID
|
||||
if build.AITaskSidebarAppID.Valid {
|
||||
taskAppID = &build.AITaskSidebarAppID.UUID
|
||||
}
|
||||
|
||||
var hasExternalAgent *bool
|
||||
if build.HasExternalAgent.Valid {
|
||||
hasExternalAgent = &build.HasExternalAgent.Bool
|
||||
@@ -1222,6 +1227,7 @@ func (api *API) convertWorkspaceBuild(
|
||||
MatchedProvisioners: &matchedProvisioners,
|
||||
TemplateVersionPresetID: presetID,
|
||||
HasAITask: hasAITask,
|
||||
AITaskSidebarAppID: taskAppID,
|
||||
HasExternalAgent: hasExternalAgent,
|
||||
}, nil
|
||||
}
|
||||
|
||||
+74
-47
@@ -4700,16 +4700,11 @@ func TestWorkspaceFilterHasAITask(t *testing.T) {
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
// Helper function to create workspace with optional task.
|
||||
createWorkspace := func(jobCompleted, createTask bool, prompt string) uuid.UUID {
|
||||
// TODO(mafredri): The bellow comment is based on deprecated logic and
|
||||
// kept only present to test that the old observable behavior works as
|
||||
// intended.
|
||||
//
|
||||
// Helper function to create workspace with AI task configuration
|
||||
createWorkspaceWithAIConfig := func(hasAITask sql.NullBool, jobCompleted bool, aiTaskPrompt *string) database.WorkspaceTable {
|
||||
// When a provisioner job uses these tags, no provisioner will match it.
|
||||
// We do this so jobs will always be stuck in "pending", allowing us to
|
||||
// exercise the intermediary state when has_ai_task is nil and we
|
||||
// compensate by looking at pending provisioning jobs.
|
||||
// We do this so jobs will always be stuck in "pending", allowing us to exercise the intermediary state when
|
||||
// has_ai_task is nil and we compensate by looking at pending provisioning jobs.
|
||||
// See GetWorkspaces clauses.
|
||||
unpickableTags := database.StringMap{"custom": "true"}
|
||||
|
||||
@@ -4728,70 +4723,102 @@ func TestWorkspaceFilterHasAITask(t *testing.T) {
|
||||
jobConfig.CompletedAt = sql.NullTime{Time: time.Now(), Valid: true}
|
||||
}
|
||||
job := dbgen.ProvisionerJob(t, db, pubsub, jobConfig)
|
||||
|
||||
res := dbgen.WorkspaceResource(t, db, database.WorkspaceResource{JobID: job.ID})
|
||||
agnt := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{ResourceID: res.ID})
|
||||
taskApp := dbgen.WorkspaceApp(t, db, database.WorkspaceApp{AgentID: agnt.ID})
|
||||
build := dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
|
||||
WorkspaceID: ws.ID,
|
||||
TemplateVersionID: version.ID,
|
||||
InitiatorID: user.UserID,
|
||||
JobID: job.ID,
|
||||
BuildNumber: 1,
|
||||
})
|
||||
|
||||
if createTask {
|
||||
task := dbgen.Task(t, db, database.TaskTable{
|
||||
WorkspaceID: uuid.NullUUID{UUID: ws.ID, Valid: true},
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
TemplateVersionID: version.ID,
|
||||
Prompt: prompt,
|
||||
})
|
||||
dbgen.TaskWorkspaceApp(t, db, database.TaskWorkspaceApp{
|
||||
TaskID: task.ID,
|
||||
WorkspaceBuildNumber: build.BuildNumber,
|
||||
WorkspaceAgentID: uuid.NullUUID{UUID: agnt.ID, Valid: true},
|
||||
WorkspaceAppID: uuid.NullUUID{UUID: taskApp.ID, Valid: true},
|
||||
})
|
||||
var sidebarAppID uuid.UUID
|
||||
if hasAITask.Bool {
|
||||
sidebarApp := dbgen.WorkspaceApp(t, db, database.WorkspaceApp{AgentID: agnt.ID})
|
||||
sidebarAppID = sidebarApp.ID
|
||||
}
|
||||
|
||||
return ws.ID
|
||||
build := dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
|
||||
WorkspaceID: ws.ID,
|
||||
TemplateVersionID: version.ID,
|
||||
InitiatorID: user.UserID,
|
||||
JobID: job.ID,
|
||||
BuildNumber: 1,
|
||||
HasAITask: hasAITask,
|
||||
AITaskSidebarAppID: uuid.NullUUID{UUID: sidebarAppID, Valid: sidebarAppID != uuid.Nil},
|
||||
})
|
||||
|
||||
if aiTaskPrompt != nil {
|
||||
err := db.InsertWorkspaceBuildParameters(dbauthz.AsSystemRestricted(ctx), database.InsertWorkspaceBuildParametersParams{
|
||||
WorkspaceBuildID: build.ID,
|
||||
Name: []string{provider.TaskPromptParameterName},
|
||||
Value: []string{*aiTaskPrompt},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
return ws
|
||||
}
|
||||
|
||||
// Create workspaces with tasks.
|
||||
wsWithTask1 := createWorkspace(true, true, "Build me a web app")
|
||||
wsWithTask2 := createWorkspace(false, true, "Another task")
|
||||
// Create test workspaces with different AI task configurations
|
||||
wsWithAITask := createWorkspaceWithAIConfig(sql.NullBool{Bool: true, Valid: true}, true, nil)
|
||||
wsWithoutAITask := createWorkspaceWithAIConfig(sql.NullBool{Bool: false, Valid: true}, false, nil)
|
||||
|
||||
// Create workspaces without tasks
|
||||
wsWithoutTask1 := createWorkspace(true, false, "")
|
||||
wsWithoutTask2 := createWorkspace(false, false, "")
|
||||
aiTaskPrompt := "Build me a web app"
|
||||
wsWithAITaskParam := createWorkspaceWithAIConfig(sql.NullBool{Valid: false}, false, &aiTaskPrompt)
|
||||
|
||||
anotherTaskPrompt := "Another task"
|
||||
wsCompletedWithAITaskParam := createWorkspaceWithAIConfig(sql.NullBool{Valid: false}, true, &anotherTaskPrompt)
|
||||
|
||||
emptyPrompt := ""
|
||||
wsWithEmptyAITaskParam := createWorkspaceWithAIConfig(sql.NullBool{Valid: false}, false, &emptyPrompt)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
|
||||
// Debug: Check all workspaces without filter first
|
||||
allRes, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{})
|
||||
require.NoError(t, err)
|
||||
t.Logf("Total workspaces created: %d", len(allRes.Workspaces))
|
||||
for i, ws := range allRes.Workspaces {
|
||||
t.Logf("All Workspace %d: ID=%s, Name=%s, Build ID=%s, Job ID=%s", i, ws.ID, ws.Name, ws.LatestBuild.ID, ws.LatestBuild.Job.ID)
|
||||
}
|
||||
|
||||
// Test filtering for workspaces with AI tasks
|
||||
// Should include: wsWithTask1 and wsWithTask2
|
||||
// Should include: wsWithAITask (has_ai_task=true) and wsWithAITaskParam (null + incomplete + param)
|
||||
res, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{
|
||||
FilterQuery: "has-ai-task:true",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
t.Logf("Expected 2 workspaces for has-ai-task:true, got %d", len(res.Workspaces))
|
||||
t.Logf("Expected workspaces: %s, %s", wsWithAITask.ID, wsWithAITaskParam.ID)
|
||||
for i, ws := range res.Workspaces {
|
||||
t.Logf("AI Task True Workspace %d: ID=%s, Name=%s", i, ws.ID, ws.Name)
|
||||
}
|
||||
require.Len(t, res.Workspaces, 2)
|
||||
workspaceIDs := []uuid.UUID{res.Workspaces[0].ID, res.Workspaces[1].ID}
|
||||
require.Contains(t, workspaceIDs, wsWithTask1)
|
||||
require.Contains(t, workspaceIDs, wsWithTask2)
|
||||
require.Contains(t, workspaceIDs, wsWithAITask.ID)
|
||||
require.Contains(t, workspaceIDs, wsWithAITaskParam.ID)
|
||||
|
||||
// Test filtering for workspaces without AI tasks
|
||||
// Should include: wsWithoutTask1, wsWithoutTask2, wsWithoutTask3
|
||||
// Should include: wsWithoutAITask, wsCompletedWithAITaskParam, wsWithEmptyAITaskParam
|
||||
res, err = client.Workspaces(ctx, codersdk.WorkspaceFilter{
|
||||
FilterQuery: "has-ai-task:false",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, res.Workspaces, 2)
|
||||
workspaceIDs = []uuid.UUID{res.Workspaces[0].ID, res.Workspaces[1].ID}
|
||||
require.Contains(t, workspaceIDs, wsWithoutTask1)
|
||||
require.Contains(t, workspaceIDs, wsWithoutTask2)
|
||||
|
||||
// Debug: print what we got
|
||||
t.Logf("Expected 3 workspaces for has-ai-task:false, got %d", len(res.Workspaces))
|
||||
for i, ws := range res.Workspaces {
|
||||
t.Logf("Workspace %d: ID=%s, Name=%s", i, ws.ID, ws.Name)
|
||||
}
|
||||
t.Logf("Expected IDs: %s, %s, %s", wsWithoutAITask.ID, wsCompletedWithAITaskParam.ID, wsWithEmptyAITaskParam.ID)
|
||||
|
||||
require.Len(t, res.Workspaces, 3)
|
||||
workspaceIDs = []uuid.UUID{res.Workspaces[0].ID, res.Workspaces[1].ID, res.Workspaces[2].ID}
|
||||
require.Contains(t, workspaceIDs, wsWithoutAITask.ID)
|
||||
require.Contains(t, workspaceIDs, wsCompletedWithAITaskParam.ID)
|
||||
require.Contains(t, workspaceIDs, wsWithEmptyAITaskParam.ID)
|
||||
|
||||
// Test no filter returns all
|
||||
res, err = client.Workspaces(ctx, codersdk.WorkspaceFilter{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, res.Workspaces, 4)
|
||||
require.Len(t, res.Workspaces, 5)
|
||||
}
|
||||
|
||||
func TestWorkspaceAppUpsertRestart(t *testing.T) {
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
|
||||
type AIBridgeInterception struct {
|
||||
ID uuid.UUID `json:"id" format:"uuid"`
|
||||
APIKeyID *string `json:"api_key_id"`
|
||||
Initiator MinimalUser `json:"initiator"`
|
||||
Provider string `json:"provider"`
|
||||
Model string `json:"model"`
|
||||
|
||||
+66
-34
@@ -17,16 +17,46 @@ import (
|
||||
// AITaskPromptParameterName is the name of the parameter used to pass prompts
|
||||
// to AI tasks.
|
||||
//
|
||||
// Deprecated: This constant is deprecated and maintained only for backwards
|
||||
// compatibility with older templates. Task prompts are now stored directly
|
||||
// in the tasks.prompt database column. New code should access prompts via
|
||||
// the Task.InitialPrompt field returned from task endpoints.
|
||||
//
|
||||
// This constant will be removed in a future major version. Templates should
|
||||
// not rely on this parameter name, as the backend will continue to create it
|
||||
// automatically for compatibility but reads from tasks.prompt.
|
||||
// Experimental: This value is experimental and may change in the future.
|
||||
const AITaskPromptParameterName = provider.TaskPromptParameterName
|
||||
|
||||
// AITasksPromptsResponse represents the response from the AITaskPrompts method.
|
||||
//
|
||||
// Experimental: This method is experimental and may change in the future.
|
||||
type AITasksPromptsResponse struct {
|
||||
// Prompts is a map of workspace build IDs to prompts.
|
||||
Prompts map[string]string `json:"prompts"`
|
||||
}
|
||||
|
||||
// AITaskPrompts returns prompts for multiple workspace builds by their IDs.
|
||||
//
|
||||
// Experimental: This method is experimental and may change in the future.
|
||||
func (c *ExperimentalClient) AITaskPrompts(ctx context.Context, buildIDs []uuid.UUID) (AITasksPromptsResponse, error) {
|
||||
if len(buildIDs) == 0 {
|
||||
return AITasksPromptsResponse{
|
||||
Prompts: make(map[string]string),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Convert UUIDs to strings and join them
|
||||
buildIDStrings := make([]string, len(buildIDs))
|
||||
for i, id := range buildIDs {
|
||||
buildIDStrings[i] = id.String()
|
||||
}
|
||||
buildIDsParam := strings.Join(buildIDStrings, ",")
|
||||
|
||||
res, err := c.Request(ctx, http.MethodGet, "/api/experimental/aitasks/prompts", nil, WithQueryParam("build_ids", buildIDsParam))
|
||||
if err != nil {
|
||||
return AITasksPromptsResponse{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return AITasksPromptsResponse{}, ReadBodyAsError(res)
|
||||
}
|
||||
var prompts AITasksPromptsResponse
|
||||
return prompts, json.NewDecoder(res.Body).Decode(&prompts)
|
||||
}
|
||||
|
||||
// CreateTaskRequest represents the request to create a new task.
|
||||
//
|
||||
// Experimental: This type is experimental and may change in the future.
|
||||
@@ -231,7 +261,6 @@ func (c *ExperimentalClient) Tasks(ctx context.Context, filter *TasksFilter) ([]
|
||||
}
|
||||
|
||||
// TaskByID fetches a single experimental task by its ID.
|
||||
// Only tasks owned by codersdk.Me are supported.
|
||||
//
|
||||
// Experimental: This method is experimental and may change in the future.
|
||||
func (c *ExperimentalClient) TaskByID(ctx context.Context, id uuid.UUID) (Task, error) {
|
||||
@@ -252,30 +281,6 @@ func (c *ExperimentalClient) TaskByID(ctx context.Context, id uuid.UUID) (Task,
|
||||
return task, nil
|
||||
}
|
||||
|
||||
// TaskByOwnerAndName fetches a single experimental task by its owner and name.
|
||||
//
|
||||
// Experimental: This method is experimental and may change in the future.
|
||||
func (c *ExperimentalClient) TaskByOwnerAndName(ctx context.Context, owner, ident string) (Task, error) {
|
||||
if owner == "" {
|
||||
owner = Me
|
||||
}
|
||||
res, err := c.Request(ctx, http.MethodGet, fmt.Sprintf("/api/experimental/tasks/%s/%s", owner, ident), nil)
|
||||
if err != nil {
|
||||
return Task{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return Task{}, ReadBodyAsError(res)
|
||||
}
|
||||
|
||||
var task Task
|
||||
if err := json.NewDecoder(res.Body).Decode(&task); err != nil {
|
||||
return Task{}, err
|
||||
}
|
||||
|
||||
return task, nil
|
||||
}
|
||||
|
||||
func splitTaskIdentifier(identifier string) (owner string, taskName string, err error) {
|
||||
parts := strings.Split(identifier, "/")
|
||||
|
||||
@@ -312,7 +317,34 @@ func (c *ExperimentalClient) TaskByIdentifier(ctx context.Context, identifier st
|
||||
return Task{}, err
|
||||
}
|
||||
|
||||
return c.TaskByOwnerAndName(ctx, owner, taskName)
|
||||
tasks, err := c.Tasks(ctx, &TasksFilter{
|
||||
Owner: owner,
|
||||
})
|
||||
if err != nil {
|
||||
return Task{}, xerrors.Errorf("list tasks for owner %q: %w", owner, err)
|
||||
}
|
||||
|
||||
if taskID, err := uuid.Parse(taskName); err == nil {
|
||||
// Find task by ID.
|
||||
for _, task := range tasks {
|
||||
if task.ID == taskID {
|
||||
return task, nil
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Find task by name.
|
||||
for _, task := range tasks {
|
||||
if task.Name == taskName {
|
||||
return task, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Mimic resource not found from API.
|
||||
var notFoundErr error = &Error{
|
||||
Response: Response{Message: "Resource not found or you do not have access to this resource"},
|
||||
}
|
||||
return Task{}, xerrors.Errorf("task %q not found for owner %q: %w", taskName, owner, notFoundErr)
|
||||
}
|
||||
|
||||
// DeleteTask deletes a task by its ID.
|
||||
|
||||
+9
-12
@@ -251,17 +251,16 @@ func (c *Client) RequestWithoutSessionToken(ctx context.Context, method, path st
|
||||
}
|
||||
|
||||
// Copy the request body so we can log it.
|
||||
var reqLogFields []any
|
||||
var reqBody []byte
|
||||
c.mu.RLock()
|
||||
logBodies := c.logBodies
|
||||
c.mu.RUnlock()
|
||||
if r != nil && logBodies {
|
||||
reqBody, err := io.ReadAll(r)
|
||||
reqBody, err = io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("read request body: %w", err)
|
||||
}
|
||||
r = bytes.NewReader(reqBody)
|
||||
reqLogFields = append(reqLogFields, slog.F("body", string(reqBody)))
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, method, serverURL.String(), r)
|
||||
@@ -292,7 +291,7 @@ func (c *Client) RequestWithoutSessionToken(ctx context.Context, method, path st
|
||||
slog.F("url", req.URL.String()),
|
||||
)
|
||||
tracing.RunWithoutSpan(ctx, func(ctx context.Context) {
|
||||
c.Logger().Debug(ctx, "sdk request", reqLogFields...)
|
||||
c.Logger().Debug(ctx, "sdk request", slog.F("body", string(reqBody)))
|
||||
})
|
||||
|
||||
resp, err := c.HTTPClient.Do(req)
|
||||
@@ -325,11 +324,11 @@ func (c *Client) RequestWithoutSessionToken(ctx context.Context, method, path st
|
||||
span.SetStatus(httpconv.ClientStatus(resp.StatusCode))
|
||||
|
||||
// Copy the response body so we can log it if it's a loggable mime type.
|
||||
var respLogFields []any
|
||||
var respBody []byte
|
||||
if resp.Body != nil && logBodies {
|
||||
mimeType := parseMimeType(resp.Header.Get("Content-Type"))
|
||||
if _, ok := loggableMimeTypes[mimeType]; ok {
|
||||
respBody, err := io.ReadAll(resp.Body)
|
||||
respBody, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("copy response body for logs: %w", err)
|
||||
}
|
||||
@@ -338,18 +337,16 @@ func (c *Client) RequestWithoutSessionToken(ctx context.Context, method, path st
|
||||
return nil, xerrors.Errorf("close response body: %w", err)
|
||||
}
|
||||
resp.Body = io.NopCloser(bytes.NewReader(respBody))
|
||||
respLogFields = append(respLogFields, slog.F("body", string(respBody)))
|
||||
}
|
||||
}
|
||||
|
||||
// See above for why this is not logged to the span.
|
||||
tracing.RunWithoutSpan(ctx, func(ctx context.Context) {
|
||||
c.Logger().Debug(ctx, "sdk response",
|
||||
append(respLogFields,
|
||||
slog.F("status", resp.StatusCode),
|
||||
slog.F("trace_id", resp.Header.Get("X-Trace-Id")),
|
||||
slog.F("span_id", resp.Header.Get("X-Span-Id")),
|
||||
)...,
|
||||
slog.F("status", resp.StatusCode),
|
||||
slog.F("body", string(respBody)),
|
||||
slog.F("trace_id", resp.Header.Get("X-Trace-Id")),
|
||||
slog.F("span_id", resp.Header.Get("X-Span-Id")),
|
||||
)
|
||||
})
|
||||
|
||||
|
||||
@@ -162,45 +162,6 @@ func Test_Client(t *testing.T) {
|
||||
require.Contains(t, logStr, strings.ReplaceAll(resBody, `"`, `\"`))
|
||||
}
|
||||
|
||||
func Test_Client_LogBodiesFalse(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const method = http.MethodPost
|
||||
const path = "/ok"
|
||||
const reqBody = `{"msg": "request body"}`
|
||||
const resBody = `{"status": "ok"}`
|
||||
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", jsonCT)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = io.WriteString(w, resBody)
|
||||
}))
|
||||
|
||||
u, err := url.Parse(s.URL)
|
||||
require.NoError(t, err)
|
||||
client := New(u)
|
||||
|
||||
logBuf := bytes.NewBuffer(nil)
|
||||
client.SetLogger(slog.Make(sloghuman.Sink(logBuf)).Leveled(slog.LevelDebug))
|
||||
client.SetLogBodies(false)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
|
||||
resp, err := client.Request(ctx, method, path, []byte(reqBody))
|
||||
require.NoError(t, err)
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, resBody, string(body))
|
||||
|
||||
logStr := logBuf.String()
|
||||
require.Contains(t, logStr, "sdk request")
|
||||
require.Contains(t, logStr, "sdk response")
|
||||
require.NotContains(t, logStr, "body")
|
||||
}
|
||||
|
||||
func Test_readBodyAsError(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
+4
-15
@@ -3339,16 +3339,6 @@ Write out the current server config as YAML to stdout.`,
|
||||
Group: &deploymentGroupAIBridge,
|
||||
YAML: "bedrock_small_fast_model",
|
||||
},
|
||||
{
|
||||
Name: "AI Bridge Inject Coder MCP tools",
|
||||
Description: "Whether to inject Coder's MCP tools into intercepted AI Bridge requests (requires the \"oauth2\" and \"mcp-server-http\" experiments to be enabled).",
|
||||
Flag: "aibridge-inject-coder-mcp-tools",
|
||||
Env: "CODER_AIBRIDGE_INJECT_CODER_MCP_TOOLS",
|
||||
Value: &c.AI.BridgeConfig.InjectCoderMCPTools,
|
||||
Default: "false",
|
||||
Group: &deploymentGroupAIBridge,
|
||||
YAML: "inject_coder_mcp_tools",
|
||||
},
|
||||
{
|
||||
Name: "Enable Authorization Recordings",
|
||||
Description: "All api requests will have a header including all authorization calls made during the request. " +
|
||||
@@ -3368,11 +3358,10 @@ Write out the current server config as YAML to stdout.`,
|
||||
}
|
||||
|
||||
type AIBridgeConfig struct {
|
||||
Enabled serpent.Bool `json:"enabled" typescript:",notnull"`
|
||||
OpenAI AIBridgeOpenAIConfig `json:"openai" typescript:",notnull"`
|
||||
Anthropic AIBridgeAnthropicConfig `json:"anthropic" typescript:",notnull"`
|
||||
Bedrock AIBridgeBedrockConfig `json:"bedrock" typescript:",notnull"`
|
||||
InjectCoderMCPTools serpent.Bool `json:"inject_coder_mcp_tools" typescript:",notnull"`
|
||||
Enabled serpent.Bool `json:"enabled" typescript:",notnull"`
|
||||
OpenAI AIBridgeOpenAIConfig `json:"openai" typescript:",notnull"`
|
||||
Anthropic AIBridgeAnthropicConfig `json:"anthropic" typescript:",notnull"`
|
||||
Bedrock AIBridgeBedrockConfig `json:"bedrock" typescript:",notnull"`
|
||||
}
|
||||
|
||||
type AIBridgeOpenAIConfig struct {
|
||||
|
||||
@@ -262,7 +262,6 @@ type OAuth2AuthorizationServerMetadata struct {
|
||||
AuthorizationEndpoint string `json:"authorization_endpoint"`
|
||||
TokenEndpoint string `json:"token_endpoint"`
|
||||
RegistrationEndpoint string `json:"registration_endpoint,omitempty"`
|
||||
RevocationEndpoint string `json:"revocation_endpoint,omitempty"`
|
||||
ResponseTypesSupported []string `json:"response_types_supported"`
|
||||
GrantTypesSupported []string `json:"grant_types_supported"`
|
||||
CodeChallengeMethodsSupported []string `json:"code_challenge_methods_supported"`
|
||||
|
||||
+11
-14
@@ -317,14 +317,13 @@ type GetWorkspaceArgs struct {
|
||||
var GetWorkspace = Tool[GetWorkspaceArgs, codersdk.Workspace]{
|
||||
Tool: aisdk.Tool{
|
||||
Name: ToolNameGetWorkspace,
|
||||
Description: `Get a workspace by name or ID.
|
||||
Description: `Get a workspace by ID.
|
||||
|
||||
This returns more data than list_workspaces to reduce token usage.`,
|
||||
Schema: aisdk.Schema{
|
||||
Properties: map[string]any{
|
||||
"workspace_id": map[string]any{
|
||||
"type": "string",
|
||||
"description": workspaceDescription,
|
||||
"type": "string",
|
||||
},
|
||||
},
|
||||
Required: []string{"workspace_id"},
|
||||
@@ -333,7 +332,7 @@ This returns more data than list_workspaces to reduce token usage.`,
|
||||
Handler: func(ctx context.Context, deps Deps, args GetWorkspaceArgs) (codersdk.Workspace, error) {
|
||||
wsID, err := uuid.Parse(args.WorkspaceID)
|
||||
if err != nil {
|
||||
return namedWorkspace(ctx, deps.coderClient, NormalizeWorkspaceInput(args.WorkspaceID))
|
||||
return codersdk.Workspace{}, xerrors.New("workspace_id must be a valid UUID")
|
||||
}
|
||||
return deps.coderClient.Workspace(ctx, wsID)
|
||||
},
|
||||
@@ -1433,7 +1432,7 @@ var WorkspaceLS = Tool[WorkspaceLSArgs, WorkspaceLSResponse]{
|
||||
Properties: map[string]any{
|
||||
"workspace": map[string]any{
|
||||
"type": "string",
|
||||
"description": workspaceAgentDescription,
|
||||
"description": workspaceDescription,
|
||||
},
|
||||
"path": map[string]any{
|
||||
"type": "string",
|
||||
@@ -1490,7 +1489,7 @@ var WorkspaceReadFile = Tool[WorkspaceReadFileArgs, WorkspaceReadFileResponse]{
|
||||
Properties: map[string]any{
|
||||
"workspace": map[string]any{
|
||||
"type": "string",
|
||||
"description": workspaceAgentDescription,
|
||||
"description": workspaceDescription,
|
||||
},
|
||||
"path": map[string]any{
|
||||
"type": "string",
|
||||
@@ -1567,7 +1566,7 @@ content you are trying to write, then re-encode it properly.
|
||||
Properties: map[string]any{
|
||||
"workspace": map[string]any{
|
||||
"type": "string",
|
||||
"description": workspaceAgentDescription,
|
||||
"description": workspaceDescription,
|
||||
},
|
||||
"path": map[string]any{
|
||||
"type": "string",
|
||||
@@ -1615,7 +1614,7 @@ var WorkspaceEditFile = Tool[WorkspaceEditFileArgs, codersdk.Response]{
|
||||
Properties: map[string]any{
|
||||
"workspace": map[string]any{
|
||||
"type": "string",
|
||||
"description": workspaceAgentDescription,
|
||||
"description": workspaceDescription,
|
||||
},
|
||||
"path": map[string]any{
|
||||
"type": "string",
|
||||
@@ -1682,7 +1681,7 @@ var WorkspaceEditFiles = Tool[WorkspaceEditFilesArgs, codersdk.Response]{
|
||||
Properties: map[string]any{
|
||||
"workspace": map[string]any{
|
||||
"type": "string",
|
||||
"description": workspaceAgentDescription,
|
||||
"description": workspaceDescription,
|
||||
},
|
||||
"files": map[string]any{
|
||||
"type": "array",
|
||||
@@ -1756,7 +1755,7 @@ var WorkspacePortForward = Tool[WorkspacePortForwardArgs, WorkspacePortForwardRe
|
||||
Properties: map[string]any{
|
||||
"workspace": map[string]any{
|
||||
"type": "string",
|
||||
"description": workspaceAgentDescription,
|
||||
"description": workspaceDescription,
|
||||
},
|
||||
"port": map[string]any{
|
||||
"type": "number",
|
||||
@@ -1813,7 +1812,7 @@ var WorkspaceListApps = Tool[WorkspaceListAppsArgs, WorkspaceListAppsResponse]{
|
||||
Properties: map[string]any{
|
||||
"workspace": map[string]any{
|
||||
"type": "string",
|
||||
"description": workspaceAgentDescription,
|
||||
"description": workspaceDescription,
|
||||
},
|
||||
},
|
||||
Required: []string{"workspace"},
|
||||
@@ -2200,9 +2199,7 @@ func newAgentConn(ctx context.Context, client *codersdk.Client, workspace string
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
const workspaceDescription = "The workspace ID or name in the format [owner/]workspace. If an owner is not specified, the authenticated user is used."
|
||||
|
||||
const workspaceAgentDescription = "The workspace name in the format [owner/]workspace[.agent]. If an owner is not specified, the authenticated user is used."
|
||||
const workspaceDescription = "The workspace name in the format [owner/]workspace[.agent]. If an owner is not specified, the authenticated user is used."
|
||||
|
||||
func taskIDDescription(action string) string {
|
||||
return fmt.Sprintf("ID or workspace identifier in the format [owner/]workspace[.agent] for the task to %s. If an owner is not specified, the authenticated user is used.", action)
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
"github.com/coder/coder/v2/agent/agenttest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbfake"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
@@ -107,9 +106,8 @@ func TestTools(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("ReportTask", func(t *testing.T) {
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
tb, err := toolsdk.NewDeps(memberClient, toolsdk.WithTaskReporter(func(args toolsdk.ReportTaskArgs) error {
|
||||
return agentClient.PatchAppStatus(ctx, agentsdk.PatchAppStatus{
|
||||
return agentClient.PatchAppStatus(setupCtx, agentsdk.PatchAppStatus{
|
||||
AppSlug: "some-agent-app",
|
||||
Message: args.Summary,
|
||||
URI: args.Link,
|
||||
@@ -128,32 +126,12 @@ func TestTools(t *testing.T) {
|
||||
t.Run("GetWorkspace", func(t *testing.T) {
|
||||
tb, err := toolsdk.NewDeps(memberClient)
|
||||
require.NoError(t, err)
|
||||
result, err := testTool(t, toolsdk.GetWorkspace, tb, toolsdk.GetWorkspaceArgs{
|
||||
WorkspaceID: r.Workspace.ID.String(),
|
||||
})
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
workspace string
|
||||
}{
|
||||
{
|
||||
name: "ByID",
|
||||
workspace: r.Workspace.ID.String(),
|
||||
},
|
||||
{
|
||||
name: "ByName",
|
||||
workspace: r.Workspace.Name,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
result, err := testTool(t, toolsdk.GetWorkspace, tb, toolsdk.GetWorkspaceArgs{
|
||||
WorkspaceID: tt.workspace,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, r.Workspace.ID, result.ID, "expected the workspace ID to match")
|
||||
})
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, r.Workspace.ID, result.ID, "expected the workspace ID to match")
|
||||
})
|
||||
|
||||
t.Run("ListTemplates", func(t *testing.T) {
|
||||
@@ -1395,17 +1373,7 @@ func TestTools(t *testing.T) {
|
||||
task := ws.Task
|
||||
|
||||
_ = agenttest.New(t, client.URL, ws.AgentToken)
|
||||
coderdtest.NewWorkspaceAgentWaiter(t, client, ws.Workspace.ID).
|
||||
WaitFor(coderdtest.AgentsReady)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
// Ensure the app is healthy (required to send task input).
|
||||
err = store.UpdateWorkspaceAppHealthByID(dbauthz.AsSystemRestricted(ctx), database.UpdateWorkspaceAppHealthByIDParams{
|
||||
ID: task.WorkspaceAppID.UUID,
|
||||
Health: database.WorkspaceAppHealthHealthy,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
coderdtest.NewWorkspaceAgentWaiter(t, client, ws.Workspace.ID).Wait()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -1466,6 +1434,8 @@ func TestTools(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tb, err := toolsdk.NewDeps(memberClient)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1535,17 +1505,7 @@ func TestTools(t *testing.T) {
|
||||
task := ws.Task
|
||||
|
||||
_ = agenttest.New(t, client.URL, ws.AgentToken)
|
||||
coderdtest.NewWorkspaceAgentWaiter(t, client, ws.Workspace.ID).
|
||||
WaitFor(coderdtest.AgentsReady)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
// Ensure the app is healthy (required to read task logs).
|
||||
err = store.UpdateWorkspaceAppHealthByID(dbauthz.AsSystemRestricted(ctx), database.UpdateWorkspaceAppHealthByIDParams{
|
||||
ID: task.WorkspaceAppID.UUID,
|
||||
Health: database.WorkspaceAppHealthHealthy,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
coderdtest.NewWorkspaceAgentWaiter(t, client, ws.Workspace.ID).Wait()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -1597,6 +1557,8 @@ func TestTools(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tb, err := toolsdk.NewDeps(memberClient)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -88,9 +88,10 @@ type WorkspaceBuild struct {
|
||||
DailyCost int32 `json:"daily_cost"`
|
||||
MatchedProvisioners *MatchedProvisioners `json:"matched_provisioners,omitempty"`
|
||||
TemplateVersionPresetID *uuid.UUID `json:"template_version_preset_id" format:"uuid"`
|
||||
// Deprecated: This field has been deprecated in favor of Task WorkspaceID.
|
||||
HasAITask *bool `json:"has_ai_task,omitempty"`
|
||||
HasExternalAgent *bool `json:"has_external_agent,omitempty"`
|
||||
HasAITask *bool `json:"has_ai_task,omitempty"`
|
||||
// Deprecated: This field has been replaced with `Task.WorkspaceAppID`
|
||||
AITaskSidebarAppID *uuid.UUID `json:"ai_task_sidebar_app_id,omitempty" format:"uuid"`
|
||||
HasExternalAgent *bool `json:"has_external_agent,omitempty"`
|
||||
}
|
||||
|
||||
// WorkspaceResource describes resources used to create a workspace, for instance:
|
||||
|
||||
@@ -0,0 +1,124 @@
|
||||
# Reference Architecture: up to 10,000 users
|
||||
|
||||
> [!CAUTION]
|
||||
> This page is a work in progress.
|
||||
>
|
||||
> We are actively testing different load profiles for this user target and will be updating
|
||||
> recommendations. Use these recommendations as a starting point, but monitor your cluster resource
|
||||
> utilization and adjust.
|
||||
|
||||
The 10,000 users architecture targets large-scale enterprises with development
|
||||
teams in multiple geographic regions.
|
||||
|
||||
**Geographic Distribution**: For these tests we deploy on 3 cloud-managed Kubernetes clusters in
|
||||
the following regions:
|
||||
|
||||
1. USA - Primary - Coderd collocated with the PostgreSQL database deployment.
|
||||
2. Europe - Workspace Proxies
|
||||
3. Asia - Workspace Proxies
|
||||
|
||||
**High Availability**: Typically, such scale requires a fully-managed HA
|
||||
PostgreSQL service, and all Coder observability features enabled for operational
|
||||
purposes.
|
||||
|
||||
**Observability**: Deploy monitoring solutions to gather Prometheus metrics and
|
||||
visualize them with Grafana to gain detailed insights into infrastructure and
|
||||
application behavior. This allows operators to respond quickly to incidents and
|
||||
continuously improve the reliability and performance of the platform.
|
||||
|
||||
## Testing Methodology
|
||||
|
||||
### Workspace Network Traffic
|
||||
|
||||
6000 concurrent workspaces (2000 per region), each sending 10 kB/s application traffic.
|
||||
|
||||
Test procedure:
|
||||
|
||||
1. Create workspaces. This happens simultaneously in each region with 200 provisioners (and thus 600 concurrent builds).
|
||||
2. Wait 5 minutes to establish baselines for metrics.
|
||||
3. Generate 10 kB/s traffic to each workspace (originating within the same region & cluster).
|
||||
|
||||
After, we examine the Coderd, Workspace Proxy, and Database metrics to look for issues.
|
||||
|
||||
### Dynamic Parameters
|
||||
|
||||
1000 connections simulating changing parameters while configuring a new workspace.
|
||||
|
||||
Test procedure:
|
||||
|
||||
1. Create a template with complex parameter logic and multiple template versions.
|
||||
1. Partition the connections among the template versions (forces Coder to process multiple template files)
|
||||
1. Simultaneously connect to the dynamic-parameters API websocket endpoint for the template version
|
||||
1. Wait for the initial parameter update.
|
||||
1. Send a new parameter value that has cascading effects among other parameters.
|
||||
1. Wait for the next update.
|
||||
|
||||
After, we examine the latency in the initial connection and update, as well as Coderd and Database metrics to look for
|
||||
issues.
|
||||
|
||||
### API Request Traffic
|
||||
|
||||
To be determined.
|
||||
|
||||
## Hardware recommendations
|
||||
|
||||
### Coderd
|
||||
|
||||
These are deployed in the Primary region only.
|
||||
|
||||
| vCPU Limit | Memory Limit | Replicas | GCP Node Pool Machine Type |
|
||||
|----------------|--------------|----------|----------------------------|
|
||||
| 4 vCPU (4000m) | 12 GiB | 10 | `c2d-standard-16` |
|
||||
|
||||
### Provisioners
|
||||
|
||||
These are deployed in each of the 3 regions.
|
||||
|
||||
| vCPU Limit | Memory Limit | Replicas | GCP Node Pool Machine Type |
|
||||
|-----------------|--------------|----------|----------------------------|
|
||||
| 0.1 vCPU (100m) | 1 GiB | 200 | `c2d-standard-16` |
|
||||
|
||||
**Footnotes**:
|
||||
|
||||
- Each provisioner handles a single concurrent build, so this configuration implies 200 concurrent
|
||||
workspace builds per region.
|
||||
- Provisioners are run as a separate Kubernetes Deployment from Coderd, although they may
|
||||
share the same node pool.
|
||||
- Separate provisioners into different namespaces in favor of zero-trust or
|
||||
multi-cloud deployments.
|
||||
|
||||
### Workspace Proxies
|
||||
|
||||
These are deployed in the non-Primary regions only.
|
||||
|
||||
| vCPU Limit | Memory Limit | Replicas | GCP Node Pool Machine Type |
|
||||
|----------------|--------------|----------|----------------------------|
|
||||
| 4 vCPU (4000m) | 12 GiB | 10 | `c2d-standard-16` |
|
||||
|
||||
**Footnotes**:
|
||||
|
||||
- Our testing implies this is somewhat overspecced for the loads we have tried. We are in process of revising these numbers.
|
||||
|
||||
### Workspaces
|
||||
|
||||
These numbers are for each of the 3 regions. We recommend that you use a separate node pool for user Workspaces.
|
||||
|
||||
| Users | Node capacity | Replicas | GCP | AWS | Azure |
|
||||
|-------------|----------------------|-------------------------------|------------------|--------------|-------------------|
|
||||
| Up to 3,000 | 8 vCPU, 32 GB memory | 256 nodes, 12 workspaces each | `t2d-standard-8` | `m5.2xlarge` | `Standard_D8s_v3` |
|
||||
|
||||
**Footnotes**:
|
||||
|
||||
- Assumed that a workspace user needs 2 GB memory to perform
|
||||
- Maximum number of Kubernetes workspace pods per node: 256
|
||||
- As workspace nodes can be distributed between regions, on-premises networks
|
||||
and cloud areas, consider different namespaces in favor of zero-trust or
|
||||
multi-cloud deployments.
|
||||
|
||||
### Database nodes
|
||||
|
||||
We conducted our test using the `db-custom-16-61440` tier on Google Cloud SQL.
|
||||
|
||||
**Footnotes**:
|
||||
|
||||
- This database tier was only just able to keep up with 600 concurrent builds in our tests.
|
||||
@@ -220,6 +220,8 @@ For sizing recommendations, see the below reference architectures:
|
||||
|
||||
- [Up to 3,000 users](3k-users.md)
|
||||
|
||||
- DRAFT: [Up to 10,000 users](10k-users.md)
|
||||
|
||||
### AWS Instance Types
|
||||
|
||||
For production AWS deployments, we recommend using non-burstable instance types,
|
||||
|
||||
@@ -36,7 +36,7 @@ We track the following resources:
|
||||
| Template<br><i>write, delete</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>active_version_id</td><td>true</td></tr><tr><td>activity_bump</td><td>true</td></tr><tr><td>allow_user_autostart</td><td>true</td></tr><tr><td>allow_user_autostop</td><td>true</td></tr><tr><td>allow_user_cancel_workspace_jobs</td><td>true</td></tr><tr><td>autostart_block_days_of_week</td><td>true</td></tr><tr><td>autostop_requirement_days_of_week</td><td>true</td></tr><tr><td>autostop_requirement_weeks</td><td>true</td></tr><tr><td>cors_behavior</td><td>true</td></tr><tr><td>created_at</td><td>false</td></tr><tr><td>created_by</td><td>true</td></tr><tr><td>created_by_avatar_url</td><td>false</td></tr><tr><td>created_by_name</td><td>false</td></tr><tr><td>created_by_username</td><td>false</td></tr><tr><td>default_ttl</td><td>true</td></tr><tr><td>deleted</td><td>false</td></tr><tr><td>deprecated</td><td>true</td></tr><tr><td>description</td><td>true</td></tr><tr><td>display_name</td><td>true</td></tr><tr><td>failure_ttl</td><td>true</td></tr><tr><td>group_acl</td><td>true</td></tr><tr><td>icon</td><td>true</td></tr><tr><td>id</td><td>true</td></tr><tr><td>max_port_sharing_level</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>organization_display_name</td><td>false</td></tr><tr><td>organization_icon</td><td>false</td></tr><tr><td>organization_id</td><td>false</td></tr><tr><td>organization_name</td><td>false</td></tr><tr><td>provisioner</td><td>true</td></tr><tr><td>require_active_version</td><td>true</td></tr><tr><td>time_til_dormant</td><td>true</td></tr><tr><td>time_til_dormant_autodelete</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>use_classic_parameter_flow</td><td>true</td></tr><tr><td>user_acl</td><td>true</td></tr></tbody></table> |
|
||||
| TemplateVersion<br><i>create, write</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>archived</td><td>true</td></tr><tr><td>created_at</td><td>false</td></tr><tr><td>created_by</td><td>true</td></tr><tr><td>created_by_avatar_url</td><td>false</td></tr><tr><td>created_by_name</td><td>false</td></tr><tr><td>created_by_username</td><td>false</td></tr><tr><td>external_auth_providers</td><td>false</td></tr><tr><td>has_ai_task</td><td>false</td></tr><tr><td>has_external_agent</td><td>false</td></tr><tr><td>id</td><td>true</td></tr><tr><td>job_id</td><td>false</td></tr><tr><td>message</td><td>false</td></tr><tr><td>name</td><td>true</td></tr><tr><td>organization_id</td><td>false</td></tr><tr><td>readme</td><td>true</td></tr><tr><td>source_example_id</td><td>false</td></tr><tr><td>template_id</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr></tbody></table> |
|
||||
| User<br><i>create, write, delete</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>avatar_url</td><td>false</td></tr><tr><td>created_at</td><td>false</td></tr><tr><td>deleted</td><td>true</td></tr><tr><td>email</td><td>true</td></tr><tr><td>github_com_user_id</td><td>false</td></tr><tr><td>hashed_one_time_passcode</td><td>false</td></tr><tr><td>hashed_password</td><td>true</td></tr><tr><td>id</td><td>true</td></tr><tr><td>is_system</td><td>true</td></tr><tr><td>last_seen_at</td><td>false</td></tr><tr><td>login_type</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>one_time_passcode_expires_at</td><td>true</td></tr><tr><td>quiet_hours_schedule</td><td>true</td></tr><tr><td>rbac_roles</td><td>true</td></tr><tr><td>status</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>username</td><td>true</td></tr></tbody></table> |
|
||||
| WorkspaceBuild<br><i>start, stop</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>build_number</td><td>false</td></tr><tr><td>created_at</td><td>false</td></tr><tr><td>daily_cost</td><td>false</td></tr><tr><td>deadline</td><td>false</td></tr><tr><td>has_ai_task</td><td>false</td></tr><tr><td>has_external_agent</td><td>false</td></tr><tr><td>id</td><td>false</td></tr><tr><td>initiator_by_avatar_url</td><td>false</td></tr><tr><td>initiator_by_name</td><td>false</td></tr><tr><td>initiator_by_username</td><td>false</td></tr><tr><td>initiator_id</td><td>false</td></tr><tr><td>job_id</td><td>false</td></tr><tr><td>max_deadline</td><td>false</td></tr><tr><td>provisioner_state</td><td>false</td></tr><tr><td>reason</td><td>false</td></tr><tr><td>template_version_id</td><td>true</td></tr><tr><td>template_version_preset_id</td><td>false</td></tr><tr><td>transition</td><td>false</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>workspace_id</td><td>false</td></tr></tbody></table> |
|
||||
| WorkspaceBuild<br><i>start, stop</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>ai_task_sidebar_app_id</td><td>false</td></tr><tr><td>build_number</td><td>false</td></tr><tr><td>created_at</td><td>false</td></tr><tr><td>daily_cost</td><td>false</td></tr><tr><td>deadline</td><td>false</td></tr><tr><td>has_ai_task</td><td>false</td></tr><tr><td>has_external_agent</td><td>false</td></tr><tr><td>id</td><td>false</td></tr><tr><td>initiator_by_avatar_url</td><td>false</td></tr><tr><td>initiator_by_name</td><td>false</td></tr><tr><td>initiator_by_username</td><td>false</td></tr><tr><td>initiator_id</td><td>false</td></tr><tr><td>job_id</td><td>false</td></tr><tr><td>max_deadline</td><td>false</td></tr><tr><td>provisioner_state</td><td>false</td></tr><tr><td>reason</td><td>false</td></tr><tr><td>template_version_id</td><td>true</td></tr><tr><td>template_version_preset_id</td><td>false</td></tr><tr><td>transition</td><td>false</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>workspace_id</td><td>false</td></tr></tbody></table> |
|
||||
| WorkspaceProxy<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>created_at</td><td>true</td></tr><tr><td>deleted</td><td>false</td></tr><tr><td>derp_enabled</td><td>true</td></tr><tr><td>derp_only</td><td>true</td></tr><tr><td>display_name</td><td>true</td></tr><tr><td>icon</td><td>true</td></tr><tr><td>id</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>region_id</td><td>true</td></tr><tr><td>token_hashed_secret</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>url</td><td>true</td></tr><tr><td>version</td><td>true</td></tr><tr><td>wildcard_hostname</td><td>true</td></tr></tbody></table> |
|
||||
| WorkspaceTable<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>automatic_updates</td><td>true</td></tr><tr><td>autostart_schedule</td><td>true</td></tr><tr><td>created_at</td><td>false</td></tr><tr><td>deleted</td><td>false</td></tr><tr><td>deleting_at</td><td>true</td></tr><tr><td>dormant_at</td><td>true</td></tr><tr><td>favorite</td><td>true</td></tr><tr><td>group_acl</td><td>true</td></tr><tr><td>id</td><td>true</td></tr><tr><td>last_used_at</td><td>false</td></tr><tr><td>name</td><td>true</td></tr><tr><td>next_start_at</td><td>true</td></tr><tr><td>organization_id</td><td>false</td></tr><tr><td>owner_id</td><td>true</td></tr><tr><td>template_id</td><td>true</td></tr><tr><td>ttl</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>user_acl</td><td>true</td></tr></tbody></table> |
|
||||
|
||||
@@ -151,36 +151,6 @@ Should you wish to purge these records, it is safe to do so. This can only be do
|
||||
directly against the `audit_logs` table in the database. We advise users to only purge old records (>1yr)
|
||||
and in accordance with your compliance requirements.
|
||||
|
||||
### Maintenance Procedures for the Audit Logs Table
|
||||
|
||||
> [!NOTE]
|
||||
> `VACUUM FULL` acquires an exclusive lock on the table, blocking all reads and writes. For more information, see the [PostgreSQL VACUUM documentation](https://www.postgresql.org/docs/current/sql-vacuum.html).
|
||||
|
||||
You may choose to run a `VACUUM` or `VACUUM FULL` operation on the audit logs table to reclaim disk space. If you choose to run the `FULL` operation, consider the following when doing so:
|
||||
|
||||
- **Run during a planned mainteance window** to ensure ample time for the operation to complete and minimize impact to users
|
||||
- **Stop all running instances of `coderd`** to prevent connection errors while the table is locked. The actual steps for this will depend on your particular deployment setup. For example, if your `coderd` deployment is running on Kubernetes:
|
||||
|
||||
```bash
|
||||
kubectl scale deployment coder --replicas=0 -n coder
|
||||
```
|
||||
|
||||
- **Terminate lingering connections** before running the `VACUUM` operation to ensure it starts immediately
|
||||
|
||||
```sql
|
||||
SELECT pg_terminate_backend(pg_stat_activity.pid)
|
||||
FROM pg_stat_activity
|
||||
WHERE pg_stat_activity.datname = 'coder' AND pid <> pg_backend_pid();
|
||||
```
|
||||
|
||||
- **Only `coderd` needs to scale down** - external provisioner daemons, workspace proxies, and workspace agents don't connect to the database directly.
|
||||
|
||||
After the vacuum completes, scale coderd back up:
|
||||
|
||||
```bash
|
||||
kubectl scale deployment coder --replicas= -n coder
|
||||
```
|
||||
|
||||
### Backup/Archive
|
||||
|
||||
Consider exporting or archiving these records before deletion:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user