Compare commits
182 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 861893e90c | |||
| e95960f7f8 | |||
| ea058c1a28 | |||
| 293336007a | |||
| f486b4b566 | |||
| 1c71fd69f6 | |||
| 948fd0fc06 | |||
| 2abe55549c | |||
| 7860b99597 | |||
| 5945febf06 | |||
| 22d4539a7a | |||
| 34d9392e37 | |||
| c316d0a3e7 | |||
| eec0b299e8 | |||
| c5619746d1 | |||
| a621c3cb13 | |||
| 0ad2f9ecd7 | |||
| d412972cd5 | |||
| 607c25b07e | |||
| bde772cfa3 | |||
| 31ad3cdd0c | |||
| 1dec6da358 | |||
| f95ae63c96 | |||
| 60793aa277 | |||
| 816d99e46c | |||
| 256284b7fe | |||
| 2bdacae5f5 | |||
| 4b5ec8a9a4 | |||
| b0c6a6dc25 | |||
| 5fb644a6cd | |||
| 12083441e0 | |||
| 52dad56462 | |||
| 360df1d84f | |||
| 8bb80b060e | |||
| 1a87e74574 | |||
| bb97ba727f | |||
| f509c841cf | |||
| 7e8559aac0 | |||
| b65c0766d2 | |||
| ff687aa780 | |||
| d4cfb24a4a | |||
| 344d11fa22 | |||
| 59cec5be65 | |||
| 7043e773cf | |||
| 0cfa03718e | |||
| 0252205374 | |||
| 6248520130 | |||
| edee917d88 | |||
| 67da4e8b56 | |||
| bcb5b43aa7 | |||
| 6f3385d5e4 | |||
| 6c097797a1 | |||
| 12372c4b1e | |||
| 21bc185254 | |||
| 0bafc05c37 | |||
| 2b9baffdcb | |||
| 358f521bbb | |||
| 2b0535b83f | |||
| 39093dbd61 | |||
| 7a3a228377 | |||
| ca234f346d | |||
| dea451de41 | |||
| 173299fcec | |||
| 6364cfa360 | |||
| e161083053 | |||
| a51eb40dca | |||
| d204e6fb84 | |||
| c6638f5547 | |||
| fb154cb60a | |||
| 900f6ef576 | |||
| 9cce241202 | |||
| b9fd9bc0ca | |||
| dbc0daa64b | |||
| 54a7ec4b5b | |||
| 5194cc8050 | |||
| 24ab5205d2 | |||
| ab28ecde88 | |||
| bef7eb9dcc | |||
| bf639d0016 | |||
| 83f2bb15c8 | |||
| 4f1ddeeaad | |||
| 77006f241b | |||
| 4e1cedf8fd | |||
| 4e365e59b6 | |||
| d140920248 | |||
| 3353e687e7 | |||
| 2bac4eb739 | |||
| 15a2bab1cd | |||
| 1c4d8fafc7 | |||
| b0b9ea6fbf | |||
| 98587cfc03 | |||
| d2787df442 | |||
| 1dec1ec4ad | |||
| d2f33932c0 | |||
| 4057363f78 | |||
| 43b8df86c1 | |||
| 4f34452bcc | |||
| 93e823931b | |||
| 6c16794173 | |||
| 119d436071 | |||
| 9613e41d21 | |||
| 947b390c5a | |||
| 6336fee3a7 | |||
| 974ca3eda6 | |||
| 20797347b4 | |||
| adcdbfd562 | |||
| 1e274063d4 | |||
| 393b3874ac | |||
| 3c69d683f4 | |||
| 0a7a3da178 | |||
| bf076fb7ee | |||
| 2ed4c7e6df | |||
| 37a8e61ea2 | |||
| 4d84d42e02 | |||
| 89301f62c5 | |||
| 1e52d15719 | |||
| 058e027e9d | |||
| fa8fceaa8f | |||
| 50823a28fd | |||
| e9b66a8083 | |||
| b776a14b46 | |||
| 7825c02876 | |||
| a4fc6dcf90 | |||
| 6a783fc5c7 | |||
| 64e0bfa880 | |||
| 065266412a | |||
| de4ff78cd1 | |||
| e6f0a1b2f6 | |||
| e2cbf03f85 | |||
| ceb417f8ba | |||
| 67044d80a0 | |||
| 381c55a97a | |||
| b0f35316da | |||
| efdaaa2c8f | |||
| e5f64eb21d | |||
| 1069ce6e19 | |||
| 9bbe3c6af9 | |||
| d700f9ebc4 | |||
| a955de906a | |||
| 051ed34580 | |||
| 203899718f | |||
| ccb5b83c19 | |||
| 00d6f15e7c | |||
| d23f5ea86f | |||
| e857060010 | |||
| db343a9885 | |||
| e8d6016807 | |||
| 911d734df9 | |||
| 0f6fbe7736 | |||
| 3fcd8c6128 | |||
| 02a80eac2e | |||
| c8335fdc54 | |||
| cfdbd5251a | |||
| 92a6d6c2c0 | |||
| d9ec892b9a | |||
| c664e4f72d | |||
| 385554dff8 | |||
| fb027da8bb | |||
| 31c1279202 | |||
| dcdca814d6 | |||
| 873e054be0 | |||
| 4c0c621f2a | |||
| f016d9e505 | |||
| 1c4dd78b05 | |||
| e82edf1b6b | |||
| bab99db9e7 | |||
| 2ee54b0af1 | |||
| d737f8c104 | |||
| f8eea54e97 | |||
| 90c11f3386 | |||
| 81a928915c | |||
| 4a3304fc38 | |||
| a5f3acac2f | |||
| 63563e57db | |||
| b40ebfb7e8 | |||
| 06cfe2705a | |||
| c247dc04a7 | |||
| b12b389455 | |||
| ca1016c6ca | |||
| 65fb0e22a8 | |||
| 5e7b3c3c28 | |||
| 2ed9e7fa6d |
@@ -4,10 +4,7 @@ description: |
|
||||
inputs:
|
||||
version:
|
||||
description: "The Go version to use."
|
||||
default: "1.25.6"
|
||||
use-preinstalled-go:
|
||||
description: "Whether to use preinstalled Go."
|
||||
default: "false"
|
||||
default: "1.25.7"
|
||||
use-cache:
|
||||
description: "Whether to use the cache."
|
||||
default: "true"
|
||||
@@ -15,9 +12,9 @@ runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5.6.0
|
||||
with:
|
||||
go-version: ${{ inputs.use-preinstalled-go == 'false' && inputs.version || '' }}
|
||||
go-version: ${{ inputs.version }}
|
||||
cache: ${{ inputs.use-cache }}
|
||||
|
||||
- name: Install gotestsum
|
||||
|
||||
@@ -7,5 +7,5 @@ runs:
|
||||
- name: Install Terraform
|
||||
uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3.1.2
|
||||
with:
|
||||
terraform_version: 1.14.1
|
||||
terraform_version: 1.14.5
|
||||
terraform_wrapper: false
|
||||
|
||||
+25
-21
@@ -35,7 +35,7 @@ jobs:
|
||||
tailnet-integration: ${{ steps.filter.outputs.tailnet-integration }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -157,7 +157,7 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -247,7 +247,7 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -272,7 +272,7 @@ jobs:
|
||||
if: ${{ !cancelled() }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -329,7 +329,7 @@ jobs:
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -381,7 +381,7 @@ jobs:
|
||||
- windows-2022
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -422,10 +422,6 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
with:
|
||||
# Runners have Go baked-in and Go will automatically
|
||||
# download the toolchain configured in go.mod, so we don't
|
||||
# need to reinstall it. It's faster on Windows runners.
|
||||
use-preinstalled-go: ${{ runner.os == 'Windows' }}
|
||||
use-cache: true
|
||||
|
||||
- name: Setup Terraform
|
||||
@@ -489,6 +485,14 @@ jobs:
|
||||
# macOS will output "The default interactive shell is now zsh" intermittently in CI.
|
||||
touch ~/.bash_profile && echo "export BASH_SILENCE_DEPRECATION_WARNING=1" >> ~/.bash_profile
|
||||
|
||||
- name: Increase PTY limit (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
shell: bash
|
||||
run: |
|
||||
# Increase PTY limit to avoid exhaustion during tests.
|
||||
# Default is 511; 999 is the maximum value on CI runner.
|
||||
sudo sysctl -w kern.tty.ptmx_max=999
|
||||
|
||||
- name: Test with PostgreSQL Database (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
uses: ./.github/actions/test-go-pg
|
||||
@@ -578,7 +582,7 @@ jobs:
|
||||
timeout-minutes: 25
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -640,7 +644,7 @@ jobs:
|
||||
timeout-minutes: 25
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -712,7 +716,7 @@ jobs:
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -739,7 +743,7 @@ jobs:
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -772,7 +776,7 @@ jobs:
|
||||
name: ${{ matrix.variant.name }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -852,7 +856,7 @@ jobs:
|
||||
if: needs.changes.outputs.site == 'true' || needs.changes.outputs.ci == 'true'
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -933,7 +937,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -1005,7 +1009,7 @@ jobs:
|
||||
if: always()
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -1120,7 +1124,7 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -1175,7 +1179,7 @@ jobs:
|
||||
IMAGE: ghcr.io/coder/coder-preview:${{ steps.build-docker.outputs.tag }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -1572,7 +1576,7 @@ jobs:
|
||||
if: needs.changes.outputs.db == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
verdict: ${{ steps.check.outputs.verdict }} # DEPLOY or NOOP
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -65,7 +65,7 @@ jobs:
|
||||
packages: write # to retag image as dogfood
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -146,7 +146,7 @@ jobs:
|
||||
needs: deploy
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ jobs:
|
||||
if: github.repository_owner == 'coder'
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -58,11 +58,11 @@ jobs:
|
||||
run: mkdir base-build-context
|
||||
|
||||
- name: Install depot.dev CLI
|
||||
uses: depot/setup-action@b0b1ea4f69e92ebf5dea3f8713a1b0c37b2126a5 # v1.6.0
|
||||
uses: depot/setup-action@15c09a5f77a0840ad4bce955686522a257853461 # v1.7.1
|
||||
|
||||
# This uses OIDC authentication, so no auth variables are required.
|
||||
- name: Build base Docker image via depot.dev
|
||||
uses: depot/build-push-action@9785b135c3c76c33db102e45be96a25ab55cd507 # v1.16.2
|
||||
uses: depot/build-push-action@5f3b3c2e5a00f0093de47f657aeaefcedff27d18 # v1.17.0
|
||||
with:
|
||||
project: wl5hnrrkns
|
||||
context: base-build-context
|
||||
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-4' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -75,7 +75,7 @@ jobs:
|
||||
BRANCH_NAME: ${{ steps.branch-name.outputs.current_branch }}
|
||||
|
||||
- name: Set up Depot CLI
|
||||
uses: depot/setup-action@b0b1ea4f69e92ebf5dea3f8713a1b0c37b2126a5 # v1.6.0
|
||||
uses: depot/setup-action@15c09a5f77a0840ad4bce955686522a257853461 # v1.7.1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
||||
@@ -88,7 +88,7 @@ jobs:
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and push Non-Nix image
|
||||
uses: depot/build-push-action@9785b135c3c76c33db102e45be96a25ab55cd507 # v1.16.2
|
||||
uses: depot/build-push-action@5f3b3c2e5a00f0093de47f657aeaefcedff27d18 # v1.17.0
|
||||
with:
|
||||
project: b4q6ltmpzh
|
||||
token: ${{ secrets.DEPOT_TOKEN }}
|
||||
@@ -125,7 +125,7 @@ jobs:
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
- windows-2022
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -64,11 +64,6 @@ jobs:
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
with:
|
||||
# Runners have Go baked-in and Go will automatically
|
||||
# download the toolchain configured in go.mod, so we don't
|
||||
# need to reinstall it. It's faster on Windows runners.
|
||||
use-preinstalled-go: ${{ runner.os == 'Windows' }}
|
||||
|
||||
- name: Setup Terraform
|
||||
uses: ./.github/actions/setup-tf
|
||||
|
||||
@@ -15,7 +15,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ jobs:
|
||||
packages: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
PR_OPEN: ${{ steps.check_pr.outputs.pr_open }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -76,7 +76,7 @@ jobs:
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -184,7 +184,7 @@ jobs:
|
||||
pull-requests: write # needed for commenting on PRs
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -228,7 +228,7 @@ jobs:
|
||||
CODER_IMAGE_TAG: ${{ needs.get_info.outputs.CODER_IMAGE_TAG }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -288,7 +288,7 @@ jobs:
|
||||
PR_HOSTNAME: "pr${{ needs.get_info.outputs.PR_NUMBER }}.${{ secrets.PR_DEPLOYMENTS_DOMAIN }}"
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -158,7 +158,7 @@ jobs:
|
||||
version: ${{ steps.version.outputs.version }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -386,12 +386,12 @@ jobs:
|
||||
|
||||
- name: Install depot.dev CLI
|
||||
if: steps.image-base-tag.outputs.tag != ''
|
||||
uses: depot/setup-action@b0b1ea4f69e92ebf5dea3f8713a1b0c37b2126a5 # v1.6.0
|
||||
uses: depot/setup-action@15c09a5f77a0840ad4bce955686522a257853461 # v1.7.1
|
||||
|
||||
# This uses OIDC authentication, so no auth variables are required.
|
||||
- name: Build base Docker image via depot.dev
|
||||
if: steps.image-base-tag.outputs.tag != ''
|
||||
uses: depot/build-push-action@9785b135c3c76c33db102e45be96a25ab55cd507 # v1.16.2
|
||||
uses: depot/build-push-action@5f3b3c2e5a00f0093de47f657aeaefcedff27d18 # v1.17.0
|
||||
with:
|
||||
project: wl5hnrrkns
|
||||
context: base-build-context
|
||||
@@ -796,7 +796,7 @@ jobs:
|
||||
# TODO: skip this if it's not a new release (i.e. a backport). This is
|
||||
# fine right now because it just makes a PR that we can close.
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -872,7 +872,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -965,7 +965,7 @@ jobs:
|
||||
if: ${{ !inputs.dry_run }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -69,7 +69,7 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -146,7 +146,7 @@ jobs:
|
||||
echo "image=$(cat "$image_job")" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8
|
||||
uses: aquasecurity/trivy-action@c1824fd6edce30d7ab345a9989de00bbd46ef284 # v0.34.0
|
||||
with:
|
||||
image-ref: ${{ steps.build.outputs.image }}
|
||||
format: sarif
|
||||
|
||||
@@ -18,12 +18,12 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: stale
|
||||
uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
|
||||
uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f # v10.2.0
|
||||
with:
|
||||
stale-issue-label: "stale"
|
||||
stale-pr-label: "stale"
|
||||
@@ -96,7 +96,7 @@ jobs:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -120,7 +120,7 @@ jobs:
|
||||
actions: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ jobs:
|
||||
pull-requests: write # required to post PR review comments by the action
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -98,3 +98,6 @@ AGENTS.local.md
|
||||
|
||||
# Ignore plans written by AI agents.
|
||||
PLAN.md
|
||||
|
||||
# Ignore any dev licenses
|
||||
license.txt
|
||||
|
||||
@@ -854,7 +854,7 @@ enterprise/aibridged/proto/aibridged.pb.go: enterprise/aibridged/proto/aibridged
|
||||
site/src/api/typesGenerated.ts: site/node_modules/.installed $(wildcard scripts/apitypings/*) $(shell find ./codersdk $(FIND_EXCLUSIONS) -type f -name '*.go')
|
||||
# -C sets the directory for the go run command
|
||||
go run -C ./scripts/apitypings main.go > $@
|
||||
(cd site/ && pnpm exec biome format --write src/api/typesGenerated.ts)
|
||||
./scripts/biome_format.sh src/api/typesGenerated.ts
|
||||
touch "$@"
|
||||
|
||||
site/e2e/provisionerGenerated.ts: site/node_modules/.installed provisionerd/proto/provisionerd.pb.go provisionersdk/proto/provisioner.pb.go
|
||||
@@ -863,7 +863,7 @@ site/e2e/provisionerGenerated.ts: site/node_modules/.installed provisionerd/prot
|
||||
|
||||
site/src/theme/icons.json: site/node_modules/.installed $(wildcard scripts/gensite/*) $(wildcard site/static/icon/*)
|
||||
go run ./scripts/gensite/ -icons "$@"
|
||||
(cd site/ && pnpm exec biome format --write src/theme/icons.json)
|
||||
./scripts/biome_format.sh src/theme/icons.json
|
||||
touch "$@"
|
||||
|
||||
examples/examples.gen.json: scripts/examplegen/main.go examples/examples.go $(shell find ./examples/templates)
|
||||
@@ -901,12 +901,12 @@ codersdk/apikey_scopes_gen.go: scripts/apikeyscopesgen/main.go coderd/rbac/scope
|
||||
|
||||
site/src/api/rbacresourcesGenerated.ts: site/node_modules/.installed scripts/typegen/codersdk.gotmpl scripts/typegen/main.go coderd/rbac/object.go coderd/rbac/policy/policy.go
|
||||
go run scripts/typegen/main.go rbac typescript > "$@"
|
||||
(cd site/ && pnpm exec biome format --write src/api/rbacresourcesGenerated.ts)
|
||||
./scripts/biome_format.sh src/api/rbacresourcesGenerated.ts
|
||||
touch "$@"
|
||||
|
||||
site/src/api/countriesGenerated.ts: site/node_modules/.installed scripts/typegen/countries.tstmpl scripts/typegen/main.go codersdk/countries.go
|
||||
go run scripts/typegen/main.go countries > "$@"
|
||||
(cd site/ && pnpm exec biome format --write src/api/countriesGenerated.ts)
|
||||
./scripts/biome_format.sh src/api/countriesGenerated.ts
|
||||
touch "$@"
|
||||
|
||||
scripts/metricsdocgen/generated_metrics: $(GO_SRC_FILES)
|
||||
@@ -950,11 +950,11 @@ coderd/apidoc/.gen: \
|
||||
touch "$@"
|
||||
|
||||
docs/manifest.json: site/node_modules/.installed coderd/apidoc/.gen docs/reference/cli/index.md
|
||||
(cd site/ && pnpm exec biome format --write ../docs/manifest.json)
|
||||
./scripts/biome_format.sh ../docs/manifest.json
|
||||
touch "$@"
|
||||
|
||||
coderd/apidoc/swagger.json: site/node_modules/.installed coderd/apidoc/.gen
|
||||
(cd site/ && pnpm exec biome format --write ../coderd/apidoc/swagger.json)
|
||||
./scripts/biome_format.sh ../coderd/apidoc/swagger.json
|
||||
touch "$@"
|
||||
|
||||
update-golden-files:
|
||||
@@ -999,11 +999,19 @@ enterprise/tailnet/testdata/.gen-golden: $(wildcard enterprise/tailnet/testdata/
|
||||
touch "$@"
|
||||
|
||||
helm/coder/tests/testdata/.gen-golden: $(wildcard helm/coder/tests/testdata/*.yaml) $(wildcard helm/coder/tests/testdata/*.golden) $(GO_SRC_FILES) $(wildcard helm/coder/tests/*_test.go)
|
||||
TZ=UTC go test ./helm/coder/tests -run=TestUpdateGoldenFiles -update
|
||||
if command -v helm >/dev/null 2>&1; then
|
||||
TZ=UTC go test ./helm/coder/tests -run=TestUpdateGoldenFiles -update
|
||||
else
|
||||
echo "WARNING: helm not found; skipping helm/coder golden generation" >&2
|
||||
fi
|
||||
touch "$@"
|
||||
|
||||
helm/provisioner/tests/testdata/.gen-golden: $(wildcard helm/provisioner/tests/testdata/*.yaml) $(wildcard helm/provisioner/tests/testdata/*.golden) $(GO_SRC_FILES) $(wildcard helm/provisioner/tests/*_test.go)
|
||||
TZ=UTC go test ./helm/provisioner/tests -run=TestUpdateGoldenFiles -update
|
||||
if command -v helm >/dev/null 2>&1; then
|
||||
TZ=UTC go test ./helm/provisioner/tests -run=TestUpdateGoldenFiles -update
|
||||
else
|
||||
echo "WARNING: helm not found; skipping helm/provisioner golden generation" >&2
|
||||
fi
|
||||
touch "$@"
|
||||
|
||||
coderd/.gen-golden: $(wildcard coderd/testdata/*/*.golden) $(GO_SRC_FILES) $(wildcard coderd/*_test.go)
|
||||
|
||||
+18
-3
@@ -41,6 +41,7 @@ import (
|
||||
"github.com/coder/coder/v2/agent/agentcontainers"
|
||||
"github.com/coder/coder/v2/agent/agentexec"
|
||||
"github.com/coder/coder/v2/agent/agentfiles"
|
||||
"github.com/coder/coder/v2/agent/agentproc"
|
||||
"github.com/coder/coder/v2/agent/agentscripts"
|
||||
"github.com/coder/coder/v2/agent/agentsocket"
|
||||
"github.com/coder/coder/v2/agent/agentssh"
|
||||
@@ -111,6 +112,12 @@ type Client interface {
|
||||
ConnectRPC28(ctx context.Context) (
|
||||
proto.DRPCAgentClient28, tailnetproto.DRPCTailnetClient28, error,
|
||||
)
|
||||
// ConnectRPC28WithRole is like ConnectRPC28 but sends an explicit
|
||||
// role query parameter to the server. The workspace agent should
|
||||
// use role "agent" to enable connection monitoring.
|
||||
ConnectRPC28WithRole(ctx context.Context, role string) (
|
||||
proto.DRPCAgentClient28, tailnetproto.DRPCTailnetClient28, error,
|
||||
)
|
||||
tailnet.DERPMapRewriter
|
||||
agentsdk.RefreshableSessionTokenProvider
|
||||
}
|
||||
@@ -296,7 +303,8 @@ type agent struct {
|
||||
containerAPIOptions []agentcontainers.Option
|
||||
containerAPI *agentcontainers.API
|
||||
|
||||
filesAPI *agentfiles.API
|
||||
filesAPI *agentfiles.API
|
||||
processAPI *agentproc.API
|
||||
|
||||
socketServerEnabled bool
|
||||
socketPath string
|
||||
@@ -369,6 +377,7 @@ func (a *agent) init() {
|
||||
a.containerAPI = agentcontainers.NewAPI(a.logger.Named("containers"), containerAPIOpts...)
|
||||
|
||||
a.filesAPI = agentfiles.NewAPI(a.logger.Named("files"), a.filesystem)
|
||||
a.processAPI = agentproc.NewAPI(a.logger.Named("processes"), a.execer)
|
||||
|
||||
a.reconnectingPTYServer = reconnectingpty.NewServer(
|
||||
a.logger.Named("reconnecting-pty"),
|
||||
@@ -997,8 +1006,10 @@ func (a *agent) run() (retErr error) {
|
||||
return xerrors.Errorf("refresh token: %w", err)
|
||||
}
|
||||
|
||||
// ConnectRPC returns the dRPC connection we use for the Agent and Tailnet v2+ APIs
|
||||
aAPI, tAPI, err := a.client.ConnectRPC28(a.hardCtx)
|
||||
// ConnectRPC returns the dRPC connection we use for the Agent and Tailnet v2+ APIs.
|
||||
// We pass role "agent" to enable connection monitoring on the server, which tracks
|
||||
// the agent's connectivity state (first_connected_at, last_connected_at, disconnected_at).
|
||||
aAPI, tAPI, err := a.client.ConnectRPC28WithRole(a.hardCtx, "agent")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2022,6 +2033,10 @@ func (a *agent) Close() error {
|
||||
a.logger.Error(a.hardCtx, "container API close", slog.Error(err))
|
||||
}
|
||||
|
||||
if err := a.processAPI.Close(); err != nil {
|
||||
a.logger.Error(a.hardCtx, "process API close", slog.Error(err))
|
||||
}
|
||||
|
||||
if a.boundaryLogProxy != nil {
|
||||
err = a.boundaryLogProxy.Close()
|
||||
if err != nil {
|
||||
|
||||
@@ -29,6 +29,7 @@ func (api *API) Routes() http.Handler {
|
||||
|
||||
r.Post("/list-directory", api.HandleLS)
|
||||
r.Get("/read-file", api.HandleReadFile)
|
||||
r.Get("/read-file-lines", api.HandleReadFileLines)
|
||||
r.Post("/write-file", api.HandleWriteFile)
|
||||
r.Post("/edit-files", api.HandleEditFiles)
|
||||
|
||||
|
||||
+283
-7
@@ -10,11 +10,10 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/icholy/replace"
|
||||
"github.com/spf13/afero"
|
||||
"golang.org/x/text/transform"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
@@ -23,6 +22,22 @@ import (
|
||||
"github.com/coder/coder/v2/codersdk/workspacesdk"
|
||||
)
|
||||
|
||||
// ReadFileLinesResponse is the JSON response for the line-based file reader.
|
||||
type ReadFileLinesResponse struct {
|
||||
// Success indicates whether the read was successful.
|
||||
Success bool `json:"success"`
|
||||
// FileSize is the original file size in bytes.
|
||||
FileSize int64 `json:"file_size,omitempty"`
|
||||
// TotalLines is the total number of lines in the file.
|
||||
TotalLines int `json:"total_lines,omitempty"`
|
||||
// LinesRead is the count of lines returned in this response.
|
||||
LinesRead int `json:"lines_read,omitempty"`
|
||||
// Content is the line-numbered file content.
|
||||
Content string `json:"content,omitempty"`
|
||||
// Error is the error message when success is false.
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
type HTTPResponseCode = int
|
||||
|
||||
func (api *API) HandleReadFile(rw http.ResponseWriter, r *http.Request) {
|
||||
@@ -103,6 +118,166 @@ func (api *API) streamFile(ctx context.Context, rw http.ResponseWriter, path str
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (api *API) HandleReadFileLines(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
query := r.URL.Query()
|
||||
parser := httpapi.NewQueryParamParser().RequiredNotEmpty("path")
|
||||
path := parser.String(query, "", "path")
|
||||
offset := parser.PositiveInt64(query, 1, "offset")
|
||||
limit := parser.PositiveInt64(query, 0, "limit")
|
||||
maxFileSize := parser.PositiveInt64(query, workspacesdk.DefaultMaxFileSize, "max_file_size")
|
||||
maxLineBytes := parser.PositiveInt64(query, workspacesdk.DefaultMaxLineBytes, "max_line_bytes")
|
||||
maxResponseLines := parser.PositiveInt64(query, workspacesdk.DefaultMaxResponseLines, "max_response_lines")
|
||||
maxResponseBytes := parser.PositiveInt64(query, workspacesdk.DefaultMaxResponseBytes, "max_response_bytes")
|
||||
parser.ErrorExcessParams(query)
|
||||
if len(parser.Errors) > 0 {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Query parameters have invalid values.",
|
||||
Validations: parser.Errors,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
resp := api.readFileLines(ctx, path, offset, limit, workspacesdk.ReadFileLinesLimits{
|
||||
MaxFileSize: maxFileSize,
|
||||
MaxLineBytes: int(maxLineBytes),
|
||||
MaxResponseLines: int(maxResponseLines),
|
||||
MaxResponseBytes: int(maxResponseBytes),
|
||||
})
|
||||
httpapi.Write(ctx, rw, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
func (api *API) readFileLines(_ context.Context, path string, offset, limit int64, limits workspacesdk.ReadFileLinesLimits) ReadFileLinesResponse {
|
||||
errResp := func(msg string) ReadFileLinesResponse {
|
||||
return ReadFileLinesResponse{Success: false, Error: msg}
|
||||
}
|
||||
|
||||
if !filepath.IsAbs(path) {
|
||||
return errResp(fmt.Sprintf("file path must be absolute: %q", path))
|
||||
}
|
||||
|
||||
f, err := api.filesystem.Open(path)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return errResp(fmt.Sprintf("file does not exist: %s", path))
|
||||
}
|
||||
if errors.Is(err, os.ErrPermission) {
|
||||
return errResp(fmt.Sprintf("permission denied: %s", path))
|
||||
}
|
||||
return errResp(fmt.Sprintf("open file: %s", err))
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
stat, err := f.Stat()
|
||||
if err != nil {
|
||||
return errResp(fmt.Sprintf("stat file: %s", err))
|
||||
}
|
||||
|
||||
if stat.IsDir() {
|
||||
return errResp(fmt.Sprintf("not a file: %s", path))
|
||||
}
|
||||
|
||||
fileSize := stat.Size()
|
||||
if fileSize > limits.MaxFileSize {
|
||||
return errResp(fmt.Sprintf(
|
||||
"file is %d bytes which exceeds the maximum of %d bytes. Use grep, sed, or awk to extract the content you need, or use offset and limit to read a portion.",
|
||||
fileSize, limits.MaxFileSize,
|
||||
))
|
||||
}
|
||||
|
||||
// Read the entire file (up to MaxFileSize).
|
||||
data, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
return errResp(fmt.Sprintf("read file: %s", err))
|
||||
}
|
||||
|
||||
// Split into lines.
|
||||
content := string(data)
|
||||
// Handle empty file.
|
||||
if content == "" {
|
||||
return ReadFileLinesResponse{
|
||||
Success: true,
|
||||
FileSize: fileSize,
|
||||
TotalLines: 0,
|
||||
LinesRead: 0,
|
||||
Content: "",
|
||||
}
|
||||
}
|
||||
|
||||
lines := strings.Split(content, "\n")
|
||||
totalLines := len(lines)
|
||||
|
||||
// offset is 1-based line number.
|
||||
if offset < 1 {
|
||||
offset = 1
|
||||
}
|
||||
if offset > int64(totalLines) {
|
||||
return errResp(fmt.Sprintf(
|
||||
"offset %d is beyond the file length of %d lines",
|
||||
offset, totalLines,
|
||||
))
|
||||
}
|
||||
|
||||
// Default limit.
|
||||
if limit <= 0 {
|
||||
limit = int64(limits.MaxResponseLines)
|
||||
}
|
||||
|
||||
startIdx := int(offset - 1) // convert to 0-based
|
||||
endIdx := startIdx + int(limit)
|
||||
if endIdx > totalLines {
|
||||
endIdx = totalLines
|
||||
}
|
||||
|
||||
var numbered []string
|
||||
totalBytesAccumulated := 0
|
||||
|
||||
for i := startIdx; i < endIdx; i++ {
|
||||
line := lines[i]
|
||||
|
||||
// Per-line truncation.
|
||||
if len(line) > limits.MaxLineBytes {
|
||||
line = line[:limits.MaxLineBytes] + "... [truncated]"
|
||||
}
|
||||
|
||||
// Format with 1-based line number.
|
||||
numberedLine := fmt.Sprintf("%d\t%s", i+1, line)
|
||||
lineBytes := len(numberedLine)
|
||||
|
||||
// Check total byte budget.
|
||||
newTotal := totalBytesAccumulated + lineBytes
|
||||
if len(numbered) > 0 {
|
||||
newTotal++ // account for \n joiner
|
||||
}
|
||||
if newTotal > limits.MaxResponseBytes {
|
||||
return errResp(fmt.Sprintf(
|
||||
"output would exceed %d bytes. Read less at a time using offset and limit parameters.",
|
||||
limits.MaxResponseBytes,
|
||||
))
|
||||
}
|
||||
|
||||
// Check line count.
|
||||
if len(numbered) >= limits.MaxResponseLines {
|
||||
return errResp(fmt.Sprintf(
|
||||
"output would exceed %d lines. Read less at a time using offset and limit parameters.",
|
||||
limits.MaxResponseLines,
|
||||
))
|
||||
}
|
||||
|
||||
numbered = append(numbered, numberedLine)
|
||||
totalBytesAccumulated = newTotal
|
||||
}
|
||||
|
||||
return ReadFileLinesResponse{
|
||||
Success: true,
|
||||
FileSize: fileSize,
|
||||
TotalLines: totalLines,
|
||||
LinesRead: len(numbered),
|
||||
Content: strings.Join(numbered, "\n"),
|
||||
}
|
||||
}
|
||||
|
||||
func (api *API) HandleWriteFile(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
@@ -245,9 +420,21 @@ func (api *API) editFile(ctx context.Context, path string, edits []workspacesdk.
|
||||
return http.StatusBadRequest, xerrors.Errorf("open %s: not a file", path)
|
||||
}
|
||||
|
||||
transforms := make([]transform.Transformer, len(edits))
|
||||
for i, edit := range edits {
|
||||
transforms[i] = replace.String(edit.Search, edit.Replace)
|
||||
data, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
return http.StatusInternalServerError, xerrors.Errorf("read %s: %w", path, err)
|
||||
}
|
||||
content := string(data)
|
||||
|
||||
for _, edit := range edits {
|
||||
var ok bool
|
||||
content, ok = fuzzyReplace(content, edit.Search, edit.Replace)
|
||||
if !ok {
|
||||
api.logger.Warn(ctx, "edit search string not found, skipping",
|
||||
slog.F("path", path),
|
||||
slog.F("search_preview", truncate(edit.Search, 64)),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Create an adjacent file to ensure it will be on the same device and can be
|
||||
@@ -258,8 +445,7 @@ func (api *API) editFile(ctx context.Context, path string, edits []workspacesdk.
|
||||
}
|
||||
defer tmpfile.Close()
|
||||
|
||||
_, err = io.Copy(tmpfile, replace.Chain(f, transforms...))
|
||||
if err != nil {
|
||||
if _, err := tmpfile.Write([]byte(content)); err != nil {
|
||||
if rerr := api.filesystem.Remove(tmpfile.Name()); rerr != nil {
|
||||
api.logger.Warn(ctx, "unable to clean up temp file", slog.Error(rerr))
|
||||
}
|
||||
@@ -273,3 +459,93 @@ func (api *API) editFile(ctx context.Context, path string, edits []workspacesdk.
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// fuzzyReplace attempts to find `search` inside `content` and replace its first
|
||||
// occurrence with `replace`. It uses a cascading match strategy inspired by
|
||||
// openai/codex's apply_patch:
|
||||
//
|
||||
// 1. Exact substring match (byte-for-byte).
|
||||
// 2. Line-by-line match ignoring trailing whitespace on each line.
|
||||
// 3. Line-by-line match ignoring all leading/trailing whitespace (indentation-tolerant).
|
||||
//
|
||||
// When a fuzzy match is found (passes 2 or 3), the replacement is still applied
|
||||
// at the byte offsets of the original content so that surrounding text (including
|
||||
// indentation of untouched lines) is preserved.
|
||||
//
|
||||
// Returns the (possibly modified) content and a bool indicating whether a match
|
||||
// was found.
|
||||
func fuzzyReplace(content, search, replace string) (string, bool) {
|
||||
// Pass 1 – exact substring (replace all occurrences).
|
||||
if strings.Contains(content, search) {
|
||||
return strings.ReplaceAll(content, search, replace), true
|
||||
}
|
||||
|
||||
// For line-level fuzzy matching we split both content and search into lines.
|
||||
contentLines := strings.SplitAfter(content, "\n")
|
||||
searchLines := strings.SplitAfter(search, "\n")
|
||||
|
||||
// A trailing newline in the search produces an empty final element from
|
||||
// SplitAfter. Drop it so it doesn't interfere with line matching.
|
||||
if len(searchLines) > 0 && searchLines[len(searchLines)-1] == "" {
|
||||
searchLines = searchLines[:len(searchLines)-1]
|
||||
}
|
||||
|
||||
// Pass 2 – trim trailing whitespace on each line.
|
||||
if start, end, ok := seekLines(contentLines, searchLines, func(a, b string) bool {
|
||||
return strings.TrimRight(a, " \t\r\n") == strings.TrimRight(b, " \t\r\n")
|
||||
}); ok {
|
||||
return spliceLines(contentLines, start, end, replace), true
|
||||
}
|
||||
|
||||
// Pass 3 – trim all leading and trailing whitespace (indentation-tolerant).
|
||||
if start, end, ok := seekLines(contentLines, searchLines, func(a, b string) bool {
|
||||
return strings.TrimSpace(a) == strings.TrimSpace(b)
|
||||
}); ok {
|
||||
return spliceLines(contentLines, start, end, replace), true
|
||||
}
|
||||
|
||||
return content, false
|
||||
}
|
||||
|
||||
// seekLines scans contentLines looking for a contiguous subsequence that matches
|
||||
// searchLines according to the provided `eq` function. It returns the start and
|
||||
// end (exclusive) indices into contentLines of the match.
|
||||
func seekLines(contentLines, searchLines []string, eq func(a, b string) bool) (start, end int, ok bool) {
|
||||
if len(searchLines) == 0 {
|
||||
return 0, 0, true
|
||||
}
|
||||
if len(searchLines) > len(contentLines) {
|
||||
return 0, 0, false
|
||||
}
|
||||
outer:
|
||||
for i := 0; i <= len(contentLines)-len(searchLines); i++ {
|
||||
for j, sLine := range searchLines {
|
||||
if !eq(contentLines[i+j], sLine) {
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
return i, i + len(searchLines), true
|
||||
}
|
||||
return 0, 0, false
|
||||
}
|
||||
|
||||
// spliceLines replaces contentLines[start:end] with replacement text, returning
|
||||
// the full content as a single string.
|
||||
func spliceLines(contentLines []string, start, end int, replacement string) string {
|
||||
var b strings.Builder
|
||||
for _, l := range contentLines[:start] {
|
||||
_, _ = b.WriteString(l)
|
||||
}
|
||||
_, _ = b.WriteString(replacement)
|
||||
for _, l := range contentLines[end:] {
|
||||
_, _ = b.WriteString(l)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func truncate(s string, n int) string {
|
||||
if len(s) <= n {
|
||||
return s
|
||||
}
|
||||
return s[:n] + "..."
|
||||
}
|
||||
|
||||
@@ -649,6 +649,106 @@ func TestEditFiles(t *testing.T) {
|
||||
filepath.Join(tmpdir, "file3"): "edited3 3",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "TrailingWhitespace",
|
||||
contents: map[string]string{filepath.Join(tmpdir, "trailing-ws"): "foo \nbar\t\t\nbaz"},
|
||||
edits: []workspacesdk.FileEdits{
|
||||
{
|
||||
Path: filepath.Join(tmpdir, "trailing-ws"),
|
||||
Edits: []workspacesdk.FileEdit{
|
||||
{
|
||||
Search: "foo\nbar\nbaz",
|
||||
Replace: "replaced",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: map[string]string{filepath.Join(tmpdir, "trailing-ws"): "replaced"},
|
||||
},
|
||||
{
|
||||
name: "TabsVsSpaces",
|
||||
contents: map[string]string{filepath.Join(tmpdir, "tabs-vs-spaces"): "\tif true {\n\t\tfoo()\n\t}"},
|
||||
edits: []workspacesdk.FileEdits{
|
||||
{
|
||||
Path: filepath.Join(tmpdir, "tabs-vs-spaces"),
|
||||
Edits: []workspacesdk.FileEdit{
|
||||
{
|
||||
// Search uses spaces but file uses tabs.
|
||||
Search: " if true {\n foo()\n }",
|
||||
Replace: "\tif true {\n\t\tbar()\n\t}",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: map[string]string{filepath.Join(tmpdir, "tabs-vs-spaces"): "\tif true {\n\t\tbar()\n\t}"},
|
||||
},
|
||||
{
|
||||
name: "DifferentIndentDepth",
|
||||
contents: map[string]string{filepath.Join(tmpdir, "indent-depth"): "\t\t\tdeep()\n\t\t\tnested()"},
|
||||
edits: []workspacesdk.FileEdits{
|
||||
{
|
||||
Path: filepath.Join(tmpdir, "indent-depth"),
|
||||
Edits: []workspacesdk.FileEdit{
|
||||
{
|
||||
// Search has wrong indent depth (1 tab instead of 3).
|
||||
Search: "\tdeep()\n\tnested()",
|
||||
Replace: "\t\t\tdeep()\n\t\t\tchanged()",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: map[string]string{filepath.Join(tmpdir, "indent-depth"): "\t\t\tdeep()\n\t\t\tchanged()"},
|
||||
},
|
||||
{
|
||||
name: "ExactMatchPreferred",
|
||||
contents: map[string]string{filepath.Join(tmpdir, "exact-preferred"): "hello world"},
|
||||
edits: []workspacesdk.FileEdits{
|
||||
{
|
||||
Path: filepath.Join(tmpdir, "exact-preferred"),
|
||||
Edits: []workspacesdk.FileEdit{
|
||||
{
|
||||
Search: "hello world",
|
||||
Replace: "goodbye world",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: map[string]string{filepath.Join(tmpdir, "exact-preferred"): "goodbye world"},
|
||||
},
|
||||
{
|
||||
name: "NoMatchStillSucceeds",
|
||||
contents: map[string]string{filepath.Join(tmpdir, "no-match"): "original content"},
|
||||
edits: []workspacesdk.FileEdits{
|
||||
{
|
||||
Path: filepath.Join(tmpdir, "no-match"),
|
||||
Edits: []workspacesdk.FileEdit{
|
||||
{
|
||||
Search: "this does not exist in the file",
|
||||
Replace: "whatever",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// File should remain unchanged.
|
||||
expected: map[string]string{filepath.Join(tmpdir, "no-match"): "original content"},
|
||||
},
|
||||
{
|
||||
name: "MixedWhitespaceMultiline",
|
||||
contents: map[string]string{filepath.Join(tmpdir, "mixed-ws"): "func main() {\n\tresult := compute()\n\tfmt.Println(result)\n}"},
|
||||
edits: []workspacesdk.FileEdits{
|
||||
{
|
||||
Path: filepath.Join(tmpdir, "mixed-ws"),
|
||||
Edits: []workspacesdk.FileEdit{
|
||||
{
|
||||
// Search uses spaces, file uses tabs.
|
||||
Search: " result := compute()\n fmt.Println(result)\n",
|
||||
Replace: "\tresult := compute()\n\tlog.Println(result)\n",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: map[string]string{filepath.Join(tmpdir, "mixed-ws"): "func main() {\n\tresult := compute()\n\tlog.Println(result)\n}"},
|
||||
},
|
||||
{
|
||||
name: "MultiError",
|
||||
contents: map[string]string{
|
||||
@@ -737,3 +837,188 @@ func TestEditFiles(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadFileLines(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tmpdir := os.TempDir()
|
||||
noPermsFilePath := filepath.Join(tmpdir, "no-perms-lines")
|
||||
|
||||
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
|
||||
fs := newTestFs(afero.NewMemMapFs(), func(call, file string) error {
|
||||
if file == noPermsFilePath {
|
||||
return os.ErrPermission
|
||||
}
|
||||
return nil
|
||||
})
|
||||
api := agentfiles.NewAPI(logger, fs)
|
||||
|
||||
dirPath := filepath.Join(tmpdir, "a-directory-lines")
|
||||
err := fs.MkdirAll(dirPath, 0o755)
|
||||
require.NoError(t, err)
|
||||
|
||||
emptyFilePath := filepath.Join(tmpdir, "empty-file")
|
||||
err = afero.WriteFile(fs, emptyFilePath, []byte(""), 0o644)
|
||||
require.NoError(t, err)
|
||||
|
||||
basicFilePath := filepath.Join(tmpdir, "basic-file")
|
||||
err = afero.WriteFile(fs, basicFilePath, []byte("line1\nline2\nline3"), 0o644)
|
||||
require.NoError(t, err)
|
||||
|
||||
longLine := string(bytes.Repeat([]byte("x"), 1025))
|
||||
longLineFilePath := filepath.Join(tmpdir, "long-line-file")
|
||||
err = afero.WriteFile(fs, longLineFilePath, []byte(longLine), 0o644)
|
||||
require.NoError(t, err)
|
||||
|
||||
largeFilePath := filepath.Join(tmpdir, "large-file")
|
||||
err = afero.WriteFile(fs, largeFilePath, bytes.Repeat([]byte("x"), 1<<20+1), 0o644)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
path string
|
||||
offset int64
|
||||
limit int64
|
||||
expSuccess bool
|
||||
expError string
|
||||
expContent string
|
||||
expTotal int
|
||||
expRead int
|
||||
expSize int64
|
||||
// useCodersdk is set for cases where the handler returns
|
||||
// codersdk.Response (query param validation) instead of ReadFileLinesResponse.
|
||||
useCodersdk bool
|
||||
}{
|
||||
{
|
||||
name: "NoPath",
|
||||
path: "",
|
||||
useCodersdk: true,
|
||||
expError: "is required",
|
||||
},
|
||||
{
|
||||
name: "RelativePath",
|
||||
path: "relative/path",
|
||||
expError: "file path must be absolute",
|
||||
},
|
||||
{
|
||||
name: "NonExistent",
|
||||
path: filepath.Join(tmpdir, "does-not-exist"),
|
||||
expError: "file does not exist",
|
||||
},
|
||||
{
|
||||
name: "IsDir",
|
||||
path: dirPath,
|
||||
expError: "not a file",
|
||||
},
|
||||
{
|
||||
name: "NoPermissions",
|
||||
path: noPermsFilePath,
|
||||
expError: "permission denied",
|
||||
},
|
||||
{
|
||||
name: "EmptyFile",
|
||||
path: emptyFilePath,
|
||||
expSuccess: true,
|
||||
expTotal: 0,
|
||||
expRead: 0,
|
||||
expSize: 0,
|
||||
},
|
||||
{
|
||||
name: "BasicRead",
|
||||
path: basicFilePath,
|
||||
expSuccess: true,
|
||||
expContent: "1\tline1\n2\tline2\n3\tline3",
|
||||
expTotal: 3,
|
||||
expRead: 3,
|
||||
expSize: int64(len("line1\nline2\nline3")),
|
||||
},
|
||||
{
|
||||
name: "Offset2",
|
||||
path: basicFilePath,
|
||||
offset: 2,
|
||||
expSuccess: true,
|
||||
expContent: "2\tline2\n3\tline3",
|
||||
expTotal: 3,
|
||||
expRead: 2,
|
||||
expSize: int64(len("line1\nline2\nline3")),
|
||||
},
|
||||
{
|
||||
name: "Limit1",
|
||||
path: basicFilePath,
|
||||
limit: 1,
|
||||
expSuccess: true,
|
||||
expContent: "1\tline1",
|
||||
expTotal: 3,
|
||||
expRead: 1,
|
||||
expSize: int64(len("line1\nline2\nline3")),
|
||||
},
|
||||
{
|
||||
name: "Offset2Limit1",
|
||||
path: basicFilePath,
|
||||
offset: 2,
|
||||
limit: 1,
|
||||
expSuccess: true,
|
||||
expContent: "2\tline2",
|
||||
expTotal: 3,
|
||||
expRead: 1,
|
||||
expSize: int64(len("line1\nline2\nline3")),
|
||||
},
|
||||
{
|
||||
name: "OffsetBeyondFile",
|
||||
path: basicFilePath,
|
||||
offset: 100,
|
||||
expError: "offset 100 is beyond the file length of 3 lines",
|
||||
},
|
||||
{
|
||||
name: "LongLineTruncation",
|
||||
path: longLineFilePath,
|
||||
expSuccess: true,
|
||||
expContent: "1\t" + string(bytes.Repeat([]byte("x"), 1024)) + "... [truncated]",
|
||||
expTotal: 1,
|
||||
expRead: 1,
|
||||
expSize: 1025,
|
||||
},
|
||||
{
|
||||
name: "LargeFile",
|
||||
path: largeFilePath,
|
||||
expError: "exceeds the maximum",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("/read-file-lines?path=%s&offset=%d&limit=%d", tt.path, tt.offset, tt.limit), nil)
|
||||
api.Routes().ServeHTTP(w, r)
|
||||
|
||||
if tt.useCodersdk {
|
||||
// Query param validation errors return codersdk.Response.
|
||||
require.Equal(t, http.StatusBadRequest, w.Code)
|
||||
require.Contains(t, w.Body.String(), tt.expError)
|
||||
return
|
||||
}
|
||||
|
||||
var resp agentfiles.ReadFileLinesResponse
|
||||
err := json.NewDecoder(w.Body).Decode(&resp)
|
||||
require.NoError(t, err)
|
||||
|
||||
if tt.expSuccess {
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
require.True(t, resp.Success)
|
||||
require.Equal(t, tt.expContent, resp.Content)
|
||||
require.Equal(t, tt.expTotal, resp.TotalLines)
|
||||
require.Equal(t, tt.expRead, resp.LinesRead)
|
||||
require.Equal(t, tt.expSize, resp.FileSize)
|
||||
} else {
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
require.False(t, resp.Success)
|
||||
require.Contains(t, resp.Error, tt.expError)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,175 @@
|
||||
package agentproc
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/agent/agentexec"
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/workspacesdk"
|
||||
)
|
||||
|
||||
// API exposes process-related operations through the agent.
|
||||
type API struct {
|
||||
logger slog.Logger
|
||||
manager *manager
|
||||
}
|
||||
|
||||
// NewAPI creates a new process API handler.
|
||||
func NewAPI(logger slog.Logger, execer agentexec.Execer) *API {
|
||||
return &API{
|
||||
logger: logger,
|
||||
manager: newManager(logger, execer),
|
||||
}
|
||||
}
|
||||
|
||||
// Close shuts down the process manager, killing all running
|
||||
// processes.
|
||||
func (api *API) Close() error {
|
||||
return api.manager.Close()
|
||||
}
|
||||
|
||||
// Routes returns the HTTP handler for process-related routes.
|
||||
func (api *API) Routes() http.Handler {
|
||||
r := chi.NewRouter()
|
||||
r.Post("/start", api.handleStartProcess)
|
||||
r.Get("/list", api.handleListProcesses)
|
||||
r.Get("/{id}/output", api.handleProcessOutput)
|
||||
r.Post("/{id}/signal", api.handleSignalProcess)
|
||||
return r
|
||||
}
|
||||
|
||||
// handleStartProcess starts a new process.
|
||||
func (api *API) handleStartProcess(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
var req workspacesdk.StartProcessRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Request body must be valid JSON.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if req.Command == "" {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Command is required.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
proc, err := api.manager.start(req)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to start process.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
httpapi.Write(ctx, rw, http.StatusOK, workspacesdk.StartProcessResponse{
|
||||
ID: proc.id,
|
||||
Started: true,
|
||||
})
|
||||
}
|
||||
|
||||
// handleListProcesses lists all tracked processes.
|
||||
func (api *API) handleListProcesses(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
infos := api.manager.list()
|
||||
httpapi.Write(ctx, rw, http.StatusOK, workspacesdk.ListProcessesResponse{
|
||||
Processes: infos,
|
||||
})
|
||||
}
|
||||
|
||||
// handleProcessOutput returns the output of a process.
|
||||
func (api *API) handleProcessOutput(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
id := chi.URLParam(r, "id")
|
||||
proc, ok := api.manager.get(id)
|
||||
if !ok {
|
||||
httpapi.Write(ctx, rw, http.StatusNotFound, codersdk.Response{
|
||||
Message: fmt.Sprintf("Process %q not found.", id),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
output, truncated := proc.output()
|
||||
info := proc.info()
|
||||
|
||||
httpapi.Write(ctx, rw, http.StatusOK, workspacesdk.ProcessOutputResponse{
|
||||
Output: output,
|
||||
Truncated: truncated,
|
||||
Running: info.Running,
|
||||
ExitCode: info.ExitCode,
|
||||
})
|
||||
}
|
||||
|
||||
// handleSignalProcess sends a signal to a running process.
|
||||
func (api *API) handleSignalProcess(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
id := chi.URLParam(r, "id")
|
||||
|
||||
var req workspacesdk.SignalProcessRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Request body must be valid JSON.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if req.Signal == "" {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Signal is required.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if req.Signal != "kill" && req.Signal != "terminate" {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: fmt.Sprintf(
|
||||
"Unsupported signal %q. Use \"kill\" or \"terminate\".",
|
||||
req.Signal,
|
||||
),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if err := api.manager.signal(id, req.Signal); err != nil {
|
||||
switch {
|
||||
case errors.Is(err, errProcessNotFound):
|
||||
httpapi.Write(ctx, rw, http.StatusNotFound, codersdk.Response{
|
||||
Message: fmt.Sprintf("Process %q not found.", id),
|
||||
})
|
||||
case errors.Is(err, errProcessNotRunning):
|
||||
httpapi.Write(ctx, rw, http.StatusConflict, codersdk.Response{
|
||||
Message: fmt.Sprintf(
|
||||
"Process %q is not running.", id,
|
||||
),
|
||||
})
|
||||
default:
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to signal process.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
httpapi.Write(ctx, rw, http.StatusOK, codersdk.Response{
|
||||
Message: fmt.Sprintf(
|
||||
"Signal %q sent to process %q.", req.Signal, id,
|
||||
),
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,636 @@
|
||||
package agentproc_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"cdr.dev/slog/v3/sloggers/slogtest"
|
||||
"github.com/coder/coder/v2/agent/agentexec"
|
||||
"github.com/coder/coder/v2/agent/agentproc"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/workspacesdk"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
// postStart sends a POST /start request and returns the recorder.
|
||||
func postStart(t *testing.T, handler http.Handler, req workspacesdk.StartProcessRequest) *httptest.ResponseRecorder {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
|
||||
body, err := json.Marshal(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequestWithContext(ctx, http.MethodPost, "/start", bytes.NewReader(body))
|
||||
handler.ServeHTTP(w, r)
|
||||
return w
|
||||
}
|
||||
|
||||
// getList sends a GET /list request and returns the recorder.
|
||||
func getList(t *testing.T, handler http.Handler) *httptest.ResponseRecorder {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequestWithContext(ctx, http.MethodGet, "/list", nil)
|
||||
handler.ServeHTTP(w, r)
|
||||
return w
|
||||
}
|
||||
|
||||
// getOutput sends a GET /{id}/output request and returns the
|
||||
// recorder.
|
||||
func getOutput(t *testing.T, handler http.Handler, id string) *httptest.ResponseRecorder {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("/%s/output", id), nil)
|
||||
handler.ServeHTTP(w, r)
|
||||
return w
|
||||
}
|
||||
|
||||
// postSignal sends a POST /{id}/signal request and returns
|
||||
// the recorder.
|
||||
func postSignal(t *testing.T, handler http.Handler, id string, req workspacesdk.SignalProcessRequest) *httptest.ResponseRecorder {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
|
||||
body, err := json.Marshal(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("/%s/signal", id), bytes.NewReader(body))
|
||||
handler.ServeHTTP(w, r)
|
||||
return w
|
||||
}
|
||||
|
||||
// newTestAPI creates a new API with a test logger and default
|
||||
// execer, returning the handler and API.
|
||||
func newTestAPI(t *testing.T) http.Handler {
|
||||
t.Helper()
|
||||
|
||||
logger := slogtest.Make(t, &slogtest.Options{
|
||||
IgnoreErrors: true,
|
||||
}).Leveled(slog.LevelDebug)
|
||||
api := agentproc.NewAPI(logger, agentexec.DefaultExecer)
|
||||
t.Cleanup(func() {
|
||||
_ = api.Close()
|
||||
})
|
||||
return api.Routes()
|
||||
}
|
||||
|
||||
// waitForExit polls the output endpoint until the process is
|
||||
// no longer running or the context expires.
|
||||
func waitForExit(t *testing.T, handler http.Handler, id string) workspacesdk.ProcessOutputResponse {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
|
||||
ticker := time.NewTicker(50 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Fatal("timed out waiting for process to exit")
|
||||
case <-ticker.C:
|
||||
w := getOutput(t, handler, id)
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
|
||||
var resp workspacesdk.ProcessOutputResponse
|
||||
err := json.NewDecoder(w.Body).Decode(&resp)
|
||||
require.NoError(t, err)
|
||||
|
||||
if !resp.Running {
|
||||
return resp
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// startAndGetID is a helper that starts a process and returns
|
||||
// the process ID.
|
||||
func startAndGetID(t *testing.T, handler http.Handler, req workspacesdk.StartProcessRequest) string {
|
||||
t.Helper()
|
||||
|
||||
w := postStart(t, handler, req)
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
|
||||
var resp workspacesdk.StartProcessResponse
|
||||
err := json.NewDecoder(w.Body).Decode(&resp)
|
||||
require.NoError(t, err)
|
||||
require.True(t, resp.Started)
|
||||
require.NotEmpty(t, resp.ID)
|
||||
return resp.ID
|
||||
}
|
||||
|
||||
func TestStartProcess(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("ForegroundCommand", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
w := postStart(t, handler, workspacesdk.StartProcessRequest{
|
||||
Command: "echo hello",
|
||||
})
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
|
||||
var resp workspacesdk.StartProcessResponse
|
||||
err := json.NewDecoder(w.Body).Decode(&resp)
|
||||
require.NoError(t, err)
|
||||
require.True(t, resp.Started)
|
||||
require.NotEmpty(t, resp.ID)
|
||||
})
|
||||
|
||||
t.Run("BackgroundCommand", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
w := postStart(t, handler, workspacesdk.StartProcessRequest{
|
||||
Command: "echo background",
|
||||
Background: true,
|
||||
})
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
|
||||
var resp workspacesdk.StartProcessResponse
|
||||
err := json.NewDecoder(w.Body).Decode(&resp)
|
||||
require.NoError(t, err)
|
||||
require.True(t, resp.Started)
|
||||
require.NotEmpty(t, resp.ID)
|
||||
})
|
||||
|
||||
t.Run("EmptyCommand", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
w := postStart(t, handler, workspacesdk.StartProcessRequest{
|
||||
Command: "",
|
||||
})
|
||||
require.Equal(t, http.StatusBadRequest, w.Code)
|
||||
|
||||
var resp codersdk.Response
|
||||
err := json.NewDecoder(w.Body).Decode(&resp)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, resp.Message, "Command is required")
|
||||
})
|
||||
|
||||
t.Run("MalformedJSON", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequestWithContext(ctx, http.MethodPost, "/start", strings.NewReader("{invalid json"))
|
||||
handler.ServeHTTP(w, r)
|
||||
|
||||
require.Equal(t, http.StatusBadRequest, w.Code)
|
||||
|
||||
var resp codersdk.Response
|
||||
err := json.NewDecoder(w.Body).Decode(&resp)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, resp.Message, "valid JSON")
|
||||
})
|
||||
|
||||
t.Run("CustomWorkDir", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Write a marker file to verify the command ran in
|
||||
// the correct directory. Comparing pwd output is
|
||||
// unreliable on Windows where Git Bash returns POSIX
|
||||
// paths.
|
||||
id := startAndGetID(t, handler, workspacesdk.StartProcessRequest{
|
||||
Command: "touch marker.txt && ls marker.txt",
|
||||
WorkDir: tmpDir,
|
||||
})
|
||||
|
||||
resp := waitForExit(t, handler, id)
|
||||
require.NotNil(t, resp.ExitCode)
|
||||
require.Equal(t, 0, *resp.ExitCode)
|
||||
require.Contains(t, resp.Output, "marker.txt")
|
||||
})
|
||||
|
||||
t.Run("CustomEnv", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
|
||||
// Use a unique env var name to avoid collisions in
|
||||
// parallel tests.
|
||||
envKey := fmt.Sprintf("TEST_PROC_ENV_%d", time.Now().UnixNano())
|
||||
envVal := "custom_value_12345"
|
||||
|
||||
id := startAndGetID(t, handler, workspacesdk.StartProcessRequest{
|
||||
Command: fmt.Sprintf("printenv %s", envKey),
|
||||
Env: map[string]string{envKey: envVal},
|
||||
})
|
||||
|
||||
resp := waitForExit(t, handler, id)
|
||||
require.NotNil(t, resp.ExitCode)
|
||||
require.Equal(t, 0, *resp.ExitCode)
|
||||
require.Contains(t, strings.TrimSpace(resp.Output), envVal)
|
||||
})
|
||||
}
|
||||
|
||||
func TestListProcesses(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("NoProcesses", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
w := getList(t, handler)
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
|
||||
var resp workspacesdk.ListProcessesResponse
|
||||
err := json.NewDecoder(w.Body).Decode(&resp)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp.Processes)
|
||||
require.Empty(t, resp.Processes)
|
||||
})
|
||||
|
||||
t.Run("MixedRunningAndExited", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
|
||||
// Start a process that exits quickly.
|
||||
exitedID := startAndGetID(t, handler, workspacesdk.StartProcessRequest{
|
||||
Command: "echo done",
|
||||
})
|
||||
waitForExit(t, handler, exitedID)
|
||||
|
||||
// Start a long-running process.
|
||||
runningID := startAndGetID(t, handler, workspacesdk.StartProcessRequest{
|
||||
Command: "sleep 300",
|
||||
Background: true,
|
||||
})
|
||||
|
||||
// List should contain both.
|
||||
w := getList(t, handler)
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
|
||||
var resp workspacesdk.ListProcessesResponse
|
||||
err := json.NewDecoder(w.Body).Decode(&resp)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, resp.Processes, 2)
|
||||
|
||||
procMap := make(map[string]workspacesdk.ProcessInfo)
|
||||
for _, p := range resp.Processes {
|
||||
procMap[p.ID] = p
|
||||
}
|
||||
|
||||
exited, ok := procMap[exitedID]
|
||||
require.True(t, ok, "exited process should be in list")
|
||||
require.False(t, exited.Running)
|
||||
require.NotNil(t, exited.ExitCode)
|
||||
|
||||
running, ok := procMap[runningID]
|
||||
require.True(t, ok, "running process should be in list")
|
||||
require.True(t, running.Running)
|
||||
|
||||
// Clean up the long-running process.
|
||||
sw := postSignal(t, handler, runningID, workspacesdk.SignalProcessRequest{
|
||||
Signal: "kill",
|
||||
})
|
||||
require.Equal(t, http.StatusOK, sw.Code)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessOutput(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("ExitedProcess", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
|
||||
id := startAndGetID(t, handler, workspacesdk.StartProcessRequest{
|
||||
Command: "echo hello-output",
|
||||
})
|
||||
|
||||
resp := waitForExit(t, handler, id)
|
||||
require.False(t, resp.Running)
|
||||
require.NotNil(t, resp.ExitCode)
|
||||
require.Equal(t, 0, *resp.ExitCode)
|
||||
require.Contains(t, resp.Output, "hello-output")
|
||||
})
|
||||
|
||||
t.Run("RunningProcess", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
|
||||
id := startAndGetID(t, handler, workspacesdk.StartProcessRequest{
|
||||
Command: "sleep 300",
|
||||
Background: true,
|
||||
})
|
||||
|
||||
w := getOutput(t, handler, id)
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
|
||||
var resp workspacesdk.ProcessOutputResponse
|
||||
err := json.NewDecoder(w.Body).Decode(&resp)
|
||||
require.NoError(t, err)
|
||||
require.True(t, resp.Running)
|
||||
|
||||
// Kill and wait for the process so cleanup does
|
||||
// not hang.
|
||||
postSignal(
|
||||
t, handler, id,
|
||||
workspacesdk.SignalProcessRequest{Signal: "kill"},
|
||||
)
|
||||
waitForExit(t, handler, id)
|
||||
})
|
||||
|
||||
t.Run("NonexistentProcess", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
w := getOutput(t, handler, "nonexistent-id-12345")
|
||||
require.Equal(t, http.StatusNotFound, w.Code)
|
||||
|
||||
var resp codersdk.Response
|
||||
err := json.NewDecoder(w.Body).Decode(&resp)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, resp.Message, "not found")
|
||||
})
|
||||
}
|
||||
|
||||
func TestSignalProcess(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("KillRunning", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
|
||||
id := startAndGetID(t, handler, workspacesdk.StartProcessRequest{
|
||||
Command: "sleep 300",
|
||||
Background: true,
|
||||
})
|
||||
|
||||
w := postSignal(t, handler, id, workspacesdk.SignalProcessRequest{
|
||||
Signal: "kill",
|
||||
})
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
|
||||
// Verify the process exits.
|
||||
resp := waitForExit(t, handler, id)
|
||||
require.False(t, resp.Running)
|
||||
})
|
||||
|
||||
t.Run("TerminateRunning", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("SIGTERM is not supported on Windows")
|
||||
}
|
||||
|
||||
handler := newTestAPI(t)
|
||||
|
||||
id := startAndGetID(t, handler, workspacesdk.StartProcessRequest{
|
||||
Command: "sleep 300",
|
||||
Background: true,
|
||||
})
|
||||
|
||||
w := postSignal(t, handler, id, workspacesdk.SignalProcessRequest{
|
||||
Signal: "terminate",
|
||||
})
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
|
||||
// Verify the process exits.
|
||||
resp := waitForExit(t, handler, id)
|
||||
require.False(t, resp.Running)
|
||||
})
|
||||
|
||||
t.Run("NonexistentProcess", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
w := postSignal(t, handler, "nonexistent-id-12345", workspacesdk.SignalProcessRequest{
|
||||
Signal: "kill",
|
||||
})
|
||||
require.Equal(t, http.StatusNotFound, w.Code)
|
||||
})
|
||||
|
||||
t.Run("AlreadyExitedProcess", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
|
||||
id := startAndGetID(t, handler, workspacesdk.StartProcessRequest{
|
||||
Command: "echo done",
|
||||
})
|
||||
|
||||
// Wait for exit first.
|
||||
waitForExit(t, handler, id)
|
||||
|
||||
// Signaling an exited process should return 409
|
||||
// Conflict via the errProcessNotRunning sentinel.
|
||||
w := postSignal(t, handler, id, workspacesdk.SignalProcessRequest{
|
||||
Signal: "kill",
|
||||
})
|
||||
assert.Equal(t, http.StatusConflict, w.Code,
|
||||
"expected 409 for signaling exited process, got %d", w.Code)
|
||||
})
|
||||
|
||||
t.Run("EmptySignal", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
|
||||
id := startAndGetID(t, handler, workspacesdk.StartProcessRequest{
|
||||
Command: "sleep 300",
|
||||
Background: true,
|
||||
})
|
||||
|
||||
w := postSignal(t, handler, id, workspacesdk.SignalProcessRequest{
|
||||
Signal: "",
|
||||
})
|
||||
require.Equal(t, http.StatusBadRequest, w.Code)
|
||||
|
||||
var resp codersdk.Response
|
||||
err := json.NewDecoder(w.Body).Decode(&resp)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, resp.Message, "Signal is required")
|
||||
|
||||
// Clean up.
|
||||
postSignal(t, handler, id, workspacesdk.SignalProcessRequest{
|
||||
Signal: "kill",
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("InvalidSignal", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
|
||||
id := startAndGetID(t, handler, workspacesdk.StartProcessRequest{
|
||||
Command: "sleep 300",
|
||||
Background: true,
|
||||
})
|
||||
|
||||
w := postSignal(t, handler, id, workspacesdk.SignalProcessRequest{
|
||||
Signal: "SIGFOO",
|
||||
})
|
||||
require.Equal(t, http.StatusBadRequest, w.Code)
|
||||
|
||||
var resp codersdk.Response
|
||||
err := json.NewDecoder(w.Body).Decode(&resp)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, resp.Message, "Unsupported signal")
|
||||
|
||||
// Clean up.
|
||||
postSignal(t, handler, id, workspacesdk.SignalProcessRequest{
|
||||
Signal: "kill",
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessLifecycle(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("StartWaitCheckOutput", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
|
||||
id := startAndGetID(t, handler, workspacesdk.StartProcessRequest{
|
||||
Command: "echo lifecycle-test && echo second-line",
|
||||
})
|
||||
|
||||
resp := waitForExit(t, handler, id)
|
||||
require.False(t, resp.Running)
|
||||
require.NotNil(t, resp.ExitCode)
|
||||
require.Equal(t, 0, *resp.ExitCode)
|
||||
require.Contains(t, resp.Output, "lifecycle-test")
|
||||
require.Contains(t, resp.Output, "second-line")
|
||||
})
|
||||
|
||||
t.Run("NonZeroExitCode", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
|
||||
id := startAndGetID(t, handler, workspacesdk.StartProcessRequest{
|
||||
Command: "exit 42",
|
||||
})
|
||||
|
||||
resp := waitForExit(t, handler, id)
|
||||
require.False(t, resp.Running)
|
||||
require.NotNil(t, resp.ExitCode)
|
||||
require.Equal(t, 42, *resp.ExitCode)
|
||||
})
|
||||
|
||||
t.Run("StartSignalVerifyExit", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
|
||||
// Start a long-running background process.
|
||||
id := startAndGetID(t, handler, workspacesdk.StartProcessRequest{
|
||||
Command: "sleep 300",
|
||||
Background: true,
|
||||
})
|
||||
|
||||
// Verify it's running.
|
||||
w := getOutput(t, handler, id)
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
var running workspacesdk.ProcessOutputResponse
|
||||
err := json.NewDecoder(w.Body).Decode(&running)
|
||||
require.NoError(t, err)
|
||||
require.True(t, running.Running)
|
||||
|
||||
// Signal it.
|
||||
sw := postSignal(t, handler, id, workspacesdk.SignalProcessRequest{
|
||||
Signal: "kill",
|
||||
})
|
||||
require.Equal(t, http.StatusOK, sw.Code)
|
||||
|
||||
// Verify it exits.
|
||||
resp := waitForExit(t, handler, id)
|
||||
require.False(t, resp.Running)
|
||||
require.NotNil(t, resp.ExitCode)
|
||||
})
|
||||
|
||||
t.Run("OutputExceedsBuffer", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
|
||||
// Generate output that exceeds MaxHeadBytes +
|
||||
// MaxTailBytes. Each line is ~100 chars, and we
|
||||
// need more than 32KB total (16KB head + 16KB
|
||||
// tail).
|
||||
lineCount := (agentproc.MaxHeadBytes+agentproc.MaxTailBytes)/50 + 500
|
||||
cmd := fmt.Sprintf(
|
||||
"for i in $(seq 1 %d); do echo \"line-$i-padding-to-make-this-longer-than-fifty-characters-total\"; done",
|
||||
lineCount,
|
||||
)
|
||||
|
||||
id := startAndGetID(t, handler, workspacesdk.StartProcessRequest{
|
||||
Command: cmd,
|
||||
})
|
||||
|
||||
resp := waitForExit(t, handler, id)
|
||||
require.False(t, resp.Running)
|
||||
require.NotNil(t, resp.ExitCode)
|
||||
require.Equal(t, 0, *resp.ExitCode)
|
||||
|
||||
// The output should be truncated with head/tail
|
||||
// strategy metadata.
|
||||
require.NotNil(t, resp.Truncated, "large output should be truncated")
|
||||
require.Equal(t, "head_tail", resp.Truncated.Strategy)
|
||||
require.Greater(t, resp.Truncated.OmittedBytes, 0)
|
||||
require.Greater(t, resp.Truncated.OriginalBytes, resp.Truncated.RetainedBytes)
|
||||
|
||||
// Verify the output contains the omission marker.
|
||||
require.Contains(t, resp.Output, "... [omitted")
|
||||
})
|
||||
|
||||
t.Run("StderrCaptured", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
handler := newTestAPI(t)
|
||||
|
||||
id := startAndGetID(t, handler, workspacesdk.StartProcessRequest{
|
||||
Command: "echo stdout-msg && echo stderr-msg >&2",
|
||||
})
|
||||
|
||||
resp := waitForExit(t, handler, id)
|
||||
require.False(t, resp.Running)
|
||||
require.NotNil(t, resp.ExitCode)
|
||||
require.Equal(t, 0, *resp.ExitCode)
|
||||
// Both stdout and stderr should be captured.
|
||||
require.Contains(t, resp.Output, "stdout-msg")
|
||||
require.Contains(t, resp.Output, "stderr-msg")
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,309 @@
|
||||
package agentproc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/coder/coder/v2/codersdk/workspacesdk"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxHeadBytes is the number of bytes retained from the
|
||||
// beginning of the output for LLM consumption.
|
||||
MaxHeadBytes = 16 << 10 // 16KB
|
||||
|
||||
// MaxTailBytes is the number of bytes retained from the
|
||||
// end of the output for LLM consumption.
|
||||
MaxTailBytes = 16 << 10 // 16KB
|
||||
|
||||
// MaxLineLength is the maximum length of a single line
|
||||
// before it is truncated. This prevents minified files
|
||||
// or other long single-line output from consuming the
|
||||
// entire buffer.
|
||||
MaxLineLength = 2048
|
||||
|
||||
// lineTruncationSuffix is appended to lines that exceed
|
||||
// MaxLineLength.
|
||||
lineTruncationSuffix = " ... [truncated]"
|
||||
)
|
||||
|
||||
// HeadTailBuffer is a thread-safe buffer that captures process
|
||||
// output and provides head+tail truncation for LLM consumption.
|
||||
// It implements io.Writer so it can be used directly as
|
||||
// cmd.Stdout or cmd.Stderr.
|
||||
//
|
||||
// The buffer stores up to MaxHeadBytes from the beginning of
|
||||
// the output and up to MaxTailBytes from the end in a ring
|
||||
// buffer, keeping total memory usage bounded regardless of
|
||||
// how much output is written.
|
||||
type HeadTailBuffer struct {
|
||||
mu sync.Mutex
|
||||
head []byte
|
||||
tail []byte
|
||||
tailPos int
|
||||
tailFull bool
|
||||
headFull bool
|
||||
totalBytes int
|
||||
maxHead int
|
||||
maxTail int
|
||||
}
|
||||
|
||||
// NewHeadTailBuffer creates a new HeadTailBuffer with the
|
||||
// default head and tail sizes.
|
||||
func NewHeadTailBuffer() *HeadTailBuffer {
|
||||
return &HeadTailBuffer{
|
||||
maxHead: MaxHeadBytes,
|
||||
maxTail: MaxTailBytes,
|
||||
}
|
||||
}
|
||||
|
||||
// NewHeadTailBufferSized creates a HeadTailBuffer with custom
|
||||
// head and tail sizes. This is useful for testing truncation
|
||||
// logic with smaller buffers.
|
||||
func NewHeadTailBufferSized(maxHead, maxTail int) *HeadTailBuffer {
|
||||
return &HeadTailBuffer{
|
||||
maxHead: maxHead,
|
||||
maxTail: maxTail,
|
||||
}
|
||||
}
|
||||
|
||||
// Write implements io.Writer. It is safe for concurrent use.
|
||||
// All bytes are accepted; the return value always equals
|
||||
// len(p) with a nil error.
|
||||
func (b *HeadTailBuffer) Write(p []byte) (int, error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
n := len(p)
|
||||
b.totalBytes += n
|
||||
|
||||
// Fill head buffer if it is not yet full.
|
||||
if !b.headFull {
|
||||
remaining := b.maxHead - len(b.head)
|
||||
if remaining > 0 {
|
||||
take := remaining
|
||||
if take > len(p) {
|
||||
take = len(p)
|
||||
}
|
||||
b.head = append(b.head, p[:take]...)
|
||||
p = p[take:]
|
||||
if len(b.head) >= b.maxHead {
|
||||
b.headFull = true
|
||||
}
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Write remaining bytes into the tail ring buffer.
|
||||
b.writeTail(p)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// writeTail appends data to the tail ring buffer. The caller
|
||||
// must hold b.mu.
|
||||
func (b *HeadTailBuffer) writeTail(p []byte) {
|
||||
if b.maxTail <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Lazily allocate the tail buffer on first use.
|
||||
if b.tail == nil {
|
||||
b.tail = make([]byte, b.maxTail)
|
||||
}
|
||||
|
||||
for len(p) > 0 {
|
||||
// Write as many bytes as fit starting at tailPos.
|
||||
space := b.maxTail - b.tailPos
|
||||
take := space
|
||||
if take > len(p) {
|
||||
take = len(p)
|
||||
}
|
||||
copy(b.tail[b.tailPos:b.tailPos+take], p[:take])
|
||||
p = p[take:]
|
||||
b.tailPos += take
|
||||
if b.tailPos >= b.maxTail {
|
||||
b.tailPos = 0
|
||||
b.tailFull = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// tailBytes returns the current tail contents in order. The
|
||||
// caller must hold b.mu.
|
||||
func (b *HeadTailBuffer) tailBytes() []byte {
|
||||
if b.tail == nil {
|
||||
return nil
|
||||
}
|
||||
if !b.tailFull {
|
||||
// Haven't wrapped yet; data is [0, tailPos).
|
||||
return b.tail[:b.tailPos]
|
||||
}
|
||||
// Wrapped: data is [tailPos, maxTail) + [0, tailPos).
|
||||
out := make([]byte, b.maxTail)
|
||||
n := copy(out, b.tail[b.tailPos:])
|
||||
copy(out[n:], b.tail[:b.tailPos])
|
||||
return out
|
||||
}
|
||||
|
||||
// Bytes returns a copy of the raw buffer contents. If no
|
||||
// truncation has occurred the full output is returned;
|
||||
// otherwise the head and tail portions are concatenated.
|
||||
func (b *HeadTailBuffer) Bytes() []byte {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
tail := b.tailBytes()
|
||||
if len(tail) == 0 {
|
||||
out := make([]byte, len(b.head))
|
||||
copy(out, b.head)
|
||||
return out
|
||||
}
|
||||
out := make([]byte, len(b.head)+len(tail))
|
||||
copy(out, b.head)
|
||||
copy(out[len(b.head):], tail)
|
||||
return out
|
||||
}
|
||||
|
||||
// Len returns the number of bytes currently stored in the
|
||||
// buffer.
|
||||
func (b *HeadTailBuffer) Len() int {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
tailLen := 0
|
||||
if b.tailFull {
|
||||
tailLen = b.maxTail
|
||||
} else if b.tail != nil {
|
||||
tailLen = b.tailPos
|
||||
}
|
||||
return len(b.head) + tailLen
|
||||
}
|
||||
|
||||
// TotalWritten returns the total number of bytes written to
|
||||
// the buffer, which may exceed the stored capacity.
|
||||
func (b *HeadTailBuffer) TotalWritten() int {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.totalBytes
|
||||
}
|
||||
|
||||
// Output returns the truncated output suitable for LLM
|
||||
// consumption, along with truncation metadata. If the total
|
||||
// output fits within the head buffer alone, the full output is
|
||||
// returned with nil truncation info. Otherwise the head and
|
||||
// tail are joined with an omission marker and long lines are
|
||||
// truncated.
|
||||
func (b *HeadTailBuffer) Output() (string, *workspacesdk.ProcessTruncation) {
|
||||
b.mu.Lock()
|
||||
head := make([]byte, len(b.head))
|
||||
copy(head, b.head)
|
||||
tail := b.tailBytes()
|
||||
total := b.totalBytes
|
||||
headFull := b.headFull
|
||||
b.mu.Unlock()
|
||||
|
||||
storedLen := len(head) + len(tail)
|
||||
|
||||
// If everything fits, no head/tail split is needed.
|
||||
if !headFull || len(tail) == 0 {
|
||||
out := truncateLines(string(head))
|
||||
if total == 0 {
|
||||
return "", nil
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// We have both head and tail data, meaning the total
|
||||
// output exceeded the head capacity. Build the
|
||||
// combined output with an omission marker.
|
||||
omitted := total - storedLen
|
||||
headStr := truncateLines(string(head))
|
||||
tailStr := truncateLines(string(tail))
|
||||
|
||||
var sb strings.Builder
|
||||
_, _ = sb.WriteString(headStr)
|
||||
if omitted > 0 {
|
||||
_, _ = sb.WriteString(fmt.Sprintf(
|
||||
"\n\n... [omitted %d bytes] ...\n\n",
|
||||
omitted,
|
||||
))
|
||||
} else {
|
||||
// Head and tail are contiguous but were stored
|
||||
// separately because the head filled up.
|
||||
_, _ = sb.WriteString("\n")
|
||||
}
|
||||
_, _ = sb.WriteString(tailStr)
|
||||
result := sb.String()
|
||||
|
||||
return result, &workspacesdk.ProcessTruncation{
|
||||
OriginalBytes: total,
|
||||
RetainedBytes: len(result),
|
||||
OmittedBytes: omitted,
|
||||
Strategy: "head_tail",
|
||||
}
|
||||
}
|
||||
|
||||
// truncateLines scans the input line by line and truncates
|
||||
// any line longer than MaxLineLength.
|
||||
func truncateLines(s string) string {
|
||||
if len(s) <= MaxLineLength {
|
||||
// Fast path: if the entire string is shorter than
|
||||
// the max line length, no line can exceed it.
|
||||
return s
|
||||
}
|
||||
|
||||
var b strings.Builder
|
||||
b.Grow(len(s))
|
||||
|
||||
for len(s) > 0 {
|
||||
idx := strings.IndexByte(s, '\n')
|
||||
var line string
|
||||
if idx == -1 {
|
||||
line = s
|
||||
s = ""
|
||||
} else {
|
||||
line = s[:idx]
|
||||
s = s[idx+1:]
|
||||
}
|
||||
|
||||
if len(line) > MaxLineLength {
|
||||
// Truncate preserving the suffix length so the
|
||||
// total does not exceed a reasonable size.
|
||||
cut := MaxLineLength - len(lineTruncationSuffix)
|
||||
if cut < 0 {
|
||||
cut = 0
|
||||
}
|
||||
_, _ = b.WriteString(line[:cut])
|
||||
_, _ = b.WriteString(lineTruncationSuffix)
|
||||
} else {
|
||||
_, _ = b.WriteString(line)
|
||||
}
|
||||
|
||||
// Re-add the newline unless this was the final
|
||||
// segment without a trailing newline.
|
||||
if idx != -1 {
|
||||
_ = b.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// Reset clears the buffer, discarding all data.
|
||||
func (b *HeadTailBuffer) Reset() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
b.head = nil
|
||||
b.tail = nil
|
||||
b.tailPos = 0
|
||||
b.tailFull = false
|
||||
b.headFull = false
|
||||
b.totalBytes = 0
|
||||
}
|
||||
@@ -0,0 +1,338 @@
|
||||
package agentproc_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/agent/agentproc"
|
||||
)
|
||||
|
||||
func TestHeadTailBuffer_EmptyBuffer(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
buf := agentproc.NewHeadTailBuffer()
|
||||
out, info := buf.Output()
|
||||
require.Empty(t, out)
|
||||
require.Nil(t, info)
|
||||
require.Equal(t, 0, buf.Len())
|
||||
require.Equal(t, 0, buf.TotalWritten())
|
||||
require.Empty(t, buf.Bytes())
|
||||
}
|
||||
|
||||
func TestHeadTailBuffer_SmallOutput(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
buf := agentproc.NewHeadTailBuffer()
|
||||
data := "hello world\n"
|
||||
n, err := buf.Write([]byte(data))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(data), n)
|
||||
|
||||
out, info := buf.Output()
|
||||
require.Equal(t, data, out)
|
||||
require.Nil(t, info, "small output should not be truncated")
|
||||
require.Equal(t, len(data), buf.Len())
|
||||
require.Equal(t, len(data), buf.TotalWritten())
|
||||
}
|
||||
|
||||
func TestHeadTailBuffer_ExactlyHeadSize(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
buf := agentproc.NewHeadTailBuffer()
|
||||
|
||||
// Build data that is exactly MaxHeadBytes using short
|
||||
// lines so that line truncation does not apply.
|
||||
line := strings.Repeat("x", 79) + "\n" // 80 bytes per line
|
||||
count := agentproc.MaxHeadBytes / len(line)
|
||||
pad := agentproc.MaxHeadBytes - (count * len(line))
|
||||
data := strings.Repeat(line, count) + strings.Repeat("y", pad)
|
||||
require.Equal(t, agentproc.MaxHeadBytes, len(data),
|
||||
"test data must be exactly MaxHeadBytes")
|
||||
|
||||
n, err := buf.Write([]byte(data))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, agentproc.MaxHeadBytes, n)
|
||||
|
||||
out, info := buf.Output()
|
||||
require.Equal(t, data, out)
|
||||
require.Nil(t, info, "output fitting in head should not be truncated")
|
||||
require.Equal(t, agentproc.MaxHeadBytes, buf.Len())
|
||||
}
|
||||
|
||||
func TestHeadTailBuffer_HeadPlusTailNoOmission(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Use a small buffer so we can test the boundary where
|
||||
// head fills and tail starts but nothing is omitted.
|
||||
// With maxHead=10, maxTail=10, writing exactly 20 bytes
|
||||
// means head gets 10, tail gets 10, omitted = 0.
|
||||
buf := agentproc.NewHeadTailBufferSized(10, 10)
|
||||
|
||||
data := "0123456789abcdefghij" // 20 bytes
|
||||
n, err := buf.Write([]byte(data))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 20, n)
|
||||
|
||||
out, info := buf.Output()
|
||||
require.NotNil(t, info)
|
||||
require.Equal(t, 0, info.OmittedBytes)
|
||||
require.Equal(t, "head_tail", info.Strategy)
|
||||
// The output should contain both head and tail.
|
||||
require.Contains(t, out, "0123456789")
|
||||
require.Contains(t, out, "abcdefghij")
|
||||
}
|
||||
|
||||
func TestHeadTailBuffer_LargeOutputTruncation(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Use small head/tail so truncation is easy to verify.
|
||||
buf := agentproc.NewHeadTailBufferSized(10, 10)
|
||||
|
||||
// Write 100 bytes: head=10, tail=10, omitted=80.
|
||||
data := strings.Repeat("A", 50) + strings.Repeat("Z", 50)
|
||||
n, err := buf.Write([]byte(data))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 100, n)
|
||||
|
||||
out, info := buf.Output()
|
||||
require.NotNil(t, info)
|
||||
require.Equal(t, 100, info.OriginalBytes)
|
||||
require.Equal(t, 80, info.OmittedBytes)
|
||||
require.Equal(t, "head_tail", info.Strategy)
|
||||
|
||||
// Head should be first 10 bytes (all A's).
|
||||
require.True(t, strings.HasPrefix(out, "AAAAAAAAAA"))
|
||||
// Tail should be last 10 bytes (all Z's).
|
||||
require.True(t, strings.HasSuffix(out, "ZZZZZZZZZZ"))
|
||||
// Omission marker should be present.
|
||||
require.Contains(t, out, "... [omitted 80 bytes] ...")
|
||||
|
||||
require.Equal(t, 20, buf.Len())
|
||||
require.Equal(t, 100, buf.TotalWritten())
|
||||
}
|
||||
|
||||
func TestHeadTailBuffer_MultiMBStaysBounded(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
buf := agentproc.NewHeadTailBuffer()
|
||||
|
||||
// Write 5MB of data in chunks.
|
||||
chunk := []byte(strings.Repeat("x", 4096) + "\n")
|
||||
totalWritten := 0
|
||||
for totalWritten < 5*1024*1024 {
|
||||
n, err := buf.Write(chunk)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(chunk), n)
|
||||
totalWritten += n
|
||||
}
|
||||
|
||||
// Memory should be bounded to head+tail.
|
||||
require.LessOrEqual(t, buf.Len(),
|
||||
agentproc.MaxHeadBytes+agentproc.MaxTailBytes)
|
||||
require.Equal(t, totalWritten, buf.TotalWritten())
|
||||
|
||||
out, info := buf.Output()
|
||||
require.NotNil(t, info)
|
||||
require.Equal(t, totalWritten, info.OriginalBytes)
|
||||
require.Greater(t, info.OmittedBytes, 0)
|
||||
require.NotEmpty(t, out)
|
||||
}
|
||||
|
||||
func TestHeadTailBuffer_LongLineTruncation(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
buf := agentproc.NewHeadTailBuffer()
|
||||
|
||||
// Write a line longer than MaxLineLength.
|
||||
longLine := strings.Repeat("m", agentproc.MaxLineLength+500)
|
||||
_, err := buf.Write([]byte(longLine + "\n"))
|
||||
require.NoError(t, err)
|
||||
|
||||
out, _ := buf.Output()
|
||||
lines := strings.Split(strings.TrimRight(out, "\n"), "\n")
|
||||
require.Len(t, lines, 1)
|
||||
require.LessOrEqual(t, len(lines[0]), agentproc.MaxLineLength)
|
||||
require.True(t, strings.HasSuffix(lines[0], "... [truncated]"))
|
||||
}
|
||||
|
||||
func TestHeadTailBuffer_LongLineInTail(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Use small buffers so we can force data into the tail.
|
||||
buf := agentproc.NewHeadTailBufferSized(20, 5000)
|
||||
|
||||
// Fill head with short data.
|
||||
_, err := buf.Write([]byte("head data goes here\n"))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Now write a very long line into the tail.
|
||||
longLine := strings.Repeat("T", agentproc.MaxLineLength+100)
|
||||
_, err = buf.Write([]byte(longLine + "\n"))
|
||||
require.NoError(t, err)
|
||||
|
||||
out, info := buf.Output()
|
||||
require.NotNil(t, info)
|
||||
// The long line in the tail should be truncated.
|
||||
require.Contains(t, out, "... [truncated]")
|
||||
}
|
||||
|
||||
func TestHeadTailBuffer_ConcurrentWrites(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
buf := agentproc.NewHeadTailBuffer()
|
||||
|
||||
const goroutines = 10
|
||||
const writes = 1000
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(goroutines)
|
||||
|
||||
for g := range goroutines {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
line := fmt.Sprintf("goroutine-%d: data\n", g)
|
||||
for range writes {
|
||||
_, err := buf.Write([]byte(line))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Verify totals are consistent.
|
||||
require.Greater(t, buf.TotalWritten(), 0)
|
||||
require.Greater(t, buf.Len(), 0)
|
||||
|
||||
out, _ := buf.Output()
|
||||
require.NotEmpty(t, out)
|
||||
}
|
||||
|
||||
func TestHeadTailBuffer_TruncationInfoFields(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
buf := agentproc.NewHeadTailBufferSized(10, 10)
|
||||
|
||||
// Write enough to cause omission.
|
||||
data := strings.Repeat("D", 50)
|
||||
_, err := buf.Write([]byte(data))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, info := buf.Output()
|
||||
require.NotNil(t, info)
|
||||
require.Equal(t, 50, info.OriginalBytes)
|
||||
require.Equal(t, 30, info.OmittedBytes)
|
||||
require.Equal(t, "head_tail", info.Strategy)
|
||||
// RetainedBytes is the length of the formatted output
|
||||
// string including the omission marker.
|
||||
require.Greater(t, info.RetainedBytes, 0)
|
||||
}
|
||||
|
||||
func TestHeadTailBuffer_MultipleSmallWrites(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
buf := agentproc.NewHeadTailBuffer()
|
||||
|
||||
// Write one byte at a time.
|
||||
expected := "hello world"
|
||||
for i := range len(expected) {
|
||||
n, err := buf.Write([]byte{expected[i]})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, n)
|
||||
}
|
||||
|
||||
out, info := buf.Output()
|
||||
require.Equal(t, expected, out)
|
||||
require.Nil(t, info)
|
||||
}
|
||||
|
||||
func TestHeadTailBuffer_WriteEmptySlice(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
buf := agentproc.NewHeadTailBuffer()
|
||||
n, err := buf.Write([]byte{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, n)
|
||||
require.Equal(t, 0, buf.TotalWritten())
|
||||
}
|
||||
|
||||
func TestHeadTailBuffer_Reset(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
buf := agentproc.NewHeadTailBuffer()
|
||||
_, err := buf.Write([]byte("some data"))
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, buf.Len(), 0)
|
||||
|
||||
buf.Reset()
|
||||
|
||||
require.Equal(t, 0, buf.Len())
|
||||
require.Equal(t, 0, buf.TotalWritten())
|
||||
out, info := buf.Output()
|
||||
require.Empty(t, out)
|
||||
require.Nil(t, info)
|
||||
}
|
||||
|
||||
func TestHeadTailBuffer_BytesReturnsCopy(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
buf := agentproc.NewHeadTailBuffer()
|
||||
_, err := buf.Write([]byte("original"))
|
||||
require.NoError(t, err)
|
||||
|
||||
b := buf.Bytes()
|
||||
require.Equal(t, []byte("original"), b)
|
||||
|
||||
// Mutating the returned slice should not affect the
|
||||
// buffer.
|
||||
b[0] = 'X'
|
||||
require.Equal(t, []byte("original"), buf.Bytes())
|
||||
}
|
||||
|
||||
func TestHeadTailBuffer_RingBufferWraparound(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Use a tail of 10 bytes and write enough to wrap
|
||||
// around multiple times.
|
||||
buf := agentproc.NewHeadTailBufferSized(5, 10)
|
||||
|
||||
// Fill head (5 bytes).
|
||||
_, err := buf.Write([]byte("HEADD"))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write 25 bytes into tail, wrapping 2.5 times.
|
||||
_, err = buf.Write([]byte("0123456789"))
|
||||
require.NoError(t, err)
|
||||
_, err = buf.Write([]byte("abcdefghij"))
|
||||
require.NoError(t, err)
|
||||
_, err = buf.Write([]byte("ABCDE"))
|
||||
require.NoError(t, err)
|
||||
|
||||
out, info := buf.Output()
|
||||
require.NotNil(t, info)
|
||||
// Tail should contain the last 10 bytes: "fghijABCDE".
|
||||
require.True(t, strings.HasSuffix(out, "fghijABCDE"),
|
||||
"expected tail to be last 10 bytes, got: %q", out)
|
||||
}
|
||||
|
||||
func TestHeadTailBuffer_MultipleLinesTruncated(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
buf := agentproc.NewHeadTailBuffer()
|
||||
|
||||
short := "short line\n"
|
||||
long := strings.Repeat("L", agentproc.MaxLineLength+100) + "\n"
|
||||
_, err := buf.Write([]byte(short + long + short))
|
||||
require.NoError(t, err)
|
||||
|
||||
out, _ := buf.Output()
|
||||
lines := strings.Split(strings.TrimRight(out, "\n"), "\n")
|
||||
require.Len(t, lines, 3)
|
||||
require.Equal(t, "short line", lines[0])
|
||||
require.True(t, strings.HasSuffix(lines[1], "... [truncated]"))
|
||||
require.Equal(t, "short line", lines[2])
|
||||
}
|
||||
@@ -0,0 +1,274 @@
|
||||
package agentproc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/agent/agentexec"
|
||||
"github.com/coder/coder/v2/codersdk/workspacesdk"
|
||||
"github.com/coder/quartz"
|
||||
)
|
||||
|
||||
var (
|
||||
errProcessNotFound = xerrors.New("process not found")
|
||||
errProcessNotRunning = xerrors.New("process is not running")
|
||||
)
|
||||
|
||||
// process represents a running or completed process.
|
||||
type process struct {
|
||||
mu sync.Mutex
|
||||
id string
|
||||
command string
|
||||
workDir string
|
||||
background bool
|
||||
cmd *exec.Cmd
|
||||
cancel context.CancelFunc
|
||||
buf *HeadTailBuffer
|
||||
running bool
|
||||
exitCode *int
|
||||
startedAt int64
|
||||
exitedAt *int64
|
||||
done chan struct{} // closed when process exits
|
||||
}
|
||||
|
||||
// info returns a snapshot of the process state.
|
||||
func (p *process) info() workspacesdk.ProcessInfo {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
return workspacesdk.ProcessInfo{
|
||||
ID: p.id,
|
||||
Command: p.command,
|
||||
WorkDir: p.workDir,
|
||||
Background: p.background,
|
||||
Running: p.running,
|
||||
ExitCode: p.exitCode,
|
||||
StartedAt: p.startedAt,
|
||||
ExitedAt: p.exitedAt,
|
||||
}
|
||||
}
|
||||
|
||||
// output returns the truncated output from the process buffer
|
||||
// along with optional truncation metadata.
|
||||
func (p *process) output() (string, *workspacesdk.ProcessTruncation) {
|
||||
return p.buf.Output()
|
||||
}
|
||||
|
||||
// manager tracks processes spawned by the agent.
|
||||
type manager struct {
|
||||
mu sync.Mutex
|
||||
logger slog.Logger
|
||||
execer agentexec.Execer
|
||||
clock quartz.Clock
|
||||
procs map[string]*process
|
||||
closed bool
|
||||
}
|
||||
|
||||
// newManager creates a new process manager.
|
||||
func newManager(logger slog.Logger, execer agentexec.Execer) *manager {
|
||||
return &manager{
|
||||
logger: logger,
|
||||
execer: execer,
|
||||
clock: quartz.NewReal(),
|
||||
procs: make(map[string]*process),
|
||||
}
|
||||
}
|
||||
|
||||
// start spawns a new process. Both foreground and background
|
||||
// processes use a long-lived context so the process survives
|
||||
// the HTTP request lifecycle. The background flag only affects
|
||||
// client-side polling behavior.
|
||||
func (m *manager) start(req workspacesdk.StartProcessRequest) (*process, error) {
|
||||
m.mu.Lock()
|
||||
if m.closed {
|
||||
m.mu.Unlock()
|
||||
return nil, xerrors.New("manager is closed")
|
||||
}
|
||||
m.mu.Unlock()
|
||||
|
||||
id := uuid.New().String()
|
||||
|
||||
// Use a cancellable context so Close() can terminate
|
||||
// all processes. context.Background() is the parent so
|
||||
// the process is not tied to any HTTP request.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cmd := m.execer.CommandContext(ctx, "sh", "-c", req.Command)
|
||||
if req.WorkDir != "" {
|
||||
cmd.Dir = req.WorkDir
|
||||
}
|
||||
cmd.Stdin = nil
|
||||
|
||||
// WaitDelay ensures cmd.Wait returns promptly after
|
||||
// the process is killed, even if child processes are
|
||||
// still holding the stdout/stderr pipes open.
|
||||
cmd.WaitDelay = 5 * time.Second
|
||||
|
||||
buf := NewHeadTailBuffer()
|
||||
cmd.Stdout = buf
|
||||
cmd.Stderr = buf
|
||||
|
||||
if len(req.Env) > 0 {
|
||||
cmd.Env = os.Environ()
|
||||
for k, v := range req.Env {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
cancel()
|
||||
return nil, xerrors.Errorf("start process: %w", err)
|
||||
}
|
||||
|
||||
now := m.clock.Now().Unix()
|
||||
proc := &process{
|
||||
id: id,
|
||||
command: req.Command,
|
||||
workDir: req.WorkDir,
|
||||
background: req.Background,
|
||||
cmd: cmd,
|
||||
cancel: cancel,
|
||||
buf: buf,
|
||||
running: true,
|
||||
startedAt: now,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
if m.closed {
|
||||
m.mu.Unlock()
|
||||
// Manager closed between our check and now. Kill the
|
||||
// process we just started.
|
||||
cancel()
|
||||
_ = cmd.Wait()
|
||||
return nil, xerrors.New("manager is closed")
|
||||
}
|
||||
m.procs[id] = proc
|
||||
m.mu.Unlock()
|
||||
|
||||
go func() {
|
||||
err := cmd.Wait()
|
||||
exitedAt := m.clock.Now().Unix()
|
||||
|
||||
proc.mu.Lock()
|
||||
proc.running = false
|
||||
proc.exitedAt = &exitedAt
|
||||
code := 0
|
||||
if err != nil {
|
||||
// Extract the exit code from the error.
|
||||
var exitErr *exec.ExitError
|
||||
if xerrors.As(err, &exitErr) {
|
||||
code = exitErr.ExitCode()
|
||||
} else {
|
||||
// Unknown error; use -1 as a sentinel.
|
||||
code = -1
|
||||
m.logger.Warn(
|
||||
context.Background(),
|
||||
"process wait returned non-exit error",
|
||||
slog.F("id", id),
|
||||
slog.Error(err),
|
||||
)
|
||||
}
|
||||
}
|
||||
proc.exitCode = &code
|
||||
proc.mu.Unlock()
|
||||
|
||||
close(proc.done)
|
||||
}()
|
||||
|
||||
return proc, nil
|
||||
}
|
||||
|
||||
// get returns a process by ID.
|
||||
func (m *manager) get(id string) (*process, bool) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
proc, ok := m.procs[id]
|
||||
return proc, ok
|
||||
}
|
||||
|
||||
// list returns info about all tracked processes.
|
||||
func (m *manager) list() []workspacesdk.ProcessInfo {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
infos := make([]workspacesdk.ProcessInfo, 0, len(m.procs))
|
||||
for _, proc := range m.procs {
|
||||
infos = append(infos, proc.info())
|
||||
}
|
||||
return infos
|
||||
}
|
||||
|
||||
// signal sends a signal to a running process. It returns
|
||||
// sentinel errors errProcessNotFound and errProcessNotRunning
|
||||
// so callers can distinguish failure modes.
|
||||
func (m *manager) signal(id string, sig string) error {
|
||||
m.mu.Lock()
|
||||
proc, ok := m.procs[id]
|
||||
m.mu.Unlock()
|
||||
|
||||
if !ok {
|
||||
return errProcessNotFound
|
||||
}
|
||||
|
||||
proc.mu.Lock()
|
||||
defer proc.mu.Unlock()
|
||||
|
||||
if !proc.running {
|
||||
return errProcessNotRunning
|
||||
}
|
||||
|
||||
switch sig {
|
||||
case "kill":
|
||||
if err := proc.cmd.Process.Kill(); err != nil {
|
||||
return xerrors.Errorf("kill process: %w", err)
|
||||
}
|
||||
case "terminate":
|
||||
//nolint:revive // syscall.SIGTERM is portable enough
|
||||
// for our supported platforms.
|
||||
if err := proc.cmd.Process.Signal(syscall.SIGTERM); err != nil {
|
||||
return xerrors.Errorf("terminate process: %w", err)
|
||||
}
|
||||
default:
|
||||
return xerrors.Errorf("unsupported signal %q", sig)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close kills all running processes and prevents new ones from
|
||||
// starting. It cancels each process's context, which causes
|
||||
// CommandContext to kill the process and its pipe goroutines to
|
||||
// drain.
|
||||
func (m *manager) Close() error {
|
||||
m.mu.Lock()
|
||||
if m.closed {
|
||||
m.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
m.closed = true
|
||||
procs := make([]*process, 0, len(m.procs))
|
||||
for _, p := range m.procs {
|
||||
procs = append(procs, p)
|
||||
}
|
||||
m.mu.Unlock()
|
||||
|
||||
for _, p := range procs {
|
||||
p.cancel()
|
||||
}
|
||||
|
||||
// Wait for all processes to exit.
|
||||
for _, p := range procs {
|
||||
<-p.done
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,37 +1,22 @@
|
||||
package agentsocket_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/agent"
|
||||
"github.com/coder/coder/v2/agent/agentsocket"
|
||||
"github.com/coder/coder/v2/agent/agenttest"
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
"github.com/coder/coder/v2/tailnet"
|
||||
"github.com/coder/coder/v2/tailnet/tailnettest"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
func TestServer(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("agentsocket is not supported on Windows")
|
||||
}
|
||||
|
||||
t.Run("StartStop", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
socketPath := filepath.Join(t.TempDir(), "test.sock")
|
||||
socketPath := testutil.AgentSocketPath(t)
|
||||
logger := slog.Make().Leveled(slog.LevelDebug)
|
||||
server, err := agentsocket.NewServer(logger, agentsocket.WithPath(socketPath))
|
||||
require.NoError(t, err)
|
||||
@@ -41,7 +26,7 @@ func TestServer(t *testing.T) {
|
||||
t.Run("AlreadyStarted", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
socketPath := filepath.Join(t.TempDir(), "test.sock")
|
||||
socketPath := testutil.AgentSocketPath(t)
|
||||
logger := slog.Make().Leveled(slog.LevelDebug)
|
||||
server1, err := agentsocket.NewServer(logger, agentsocket.WithPath(socketPath))
|
||||
require.NoError(t, err)
|
||||
@@ -49,90 +34,4 @@ func TestServer(t *testing.T) {
|
||||
_, err = agentsocket.NewServer(logger, agentsocket.WithPath(socketPath))
|
||||
require.ErrorContains(t, err, "create socket")
|
||||
})
|
||||
|
||||
t.Run("AutoSocketPath", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
socketPath := filepath.Join(t.TempDir(), "test.sock")
|
||||
logger := slog.Make().Leveled(slog.LevelDebug)
|
||||
server, err := agentsocket.NewServer(logger, agentsocket.WithPath(socketPath))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, server.Close())
|
||||
})
|
||||
}
|
||||
|
||||
func TestServerWindowsNotSupported(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skip("this test only runs on Windows")
|
||||
}
|
||||
|
||||
t.Run("NewServer", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
socketPath := filepath.Join(t.TempDir(), "test.sock")
|
||||
logger := slog.Make().Leveled(slog.LevelDebug)
|
||||
_, err := agentsocket.NewServer(logger, agentsocket.WithPath(socketPath))
|
||||
require.ErrorContains(t, err, "agentsocket is not supported on Windows")
|
||||
})
|
||||
|
||||
t.Run("NewClient", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
_, err := agentsocket.NewClient(context.Background(), agentsocket.WithPath("test.sock"))
|
||||
require.ErrorContains(t, err, "agentsocket is not supported on Windows")
|
||||
})
|
||||
}
|
||||
|
||||
func TestAgentInitializesOnWindowsWithoutSocketServer(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skip("this test only runs on Windows")
|
||||
}
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
logger := testutil.Logger(t).Named("agent")
|
||||
|
||||
derpMap, _ := tailnettest.RunDERPAndSTUN(t)
|
||||
|
||||
coordinator := tailnet.NewCoordinator(logger)
|
||||
t.Cleanup(func() {
|
||||
_ = coordinator.Close()
|
||||
})
|
||||
|
||||
statsCh := make(chan *agentproto.Stats, 50)
|
||||
agentID := uuid.New()
|
||||
manifest := agentsdk.Manifest{
|
||||
AgentID: agentID,
|
||||
AgentName: "test-agent",
|
||||
WorkspaceName: "test-workspace",
|
||||
OwnerName: "test-user",
|
||||
WorkspaceID: uuid.New(),
|
||||
DERPMap: derpMap,
|
||||
}
|
||||
|
||||
client := agenttest.NewClient(t, logger.Named("agenttest"), agentID, manifest, statsCh, coordinator)
|
||||
t.Cleanup(client.Close)
|
||||
|
||||
options := agent.Options{
|
||||
Client: client,
|
||||
Filesystem: afero.NewMemMapFs(),
|
||||
Logger: logger.Named("agent"),
|
||||
ReconnectingPTYTimeout: testutil.WaitShort,
|
||||
EnvironmentVariables: map[string]string{},
|
||||
SocketPath: "",
|
||||
}
|
||||
|
||||
agnt := agent.New(options)
|
||||
t.Cleanup(func() {
|
||||
_ = agnt.Close()
|
||||
})
|
||||
|
||||
startup := testutil.TryReceive(ctx, t, client.GetStartup())
|
||||
require.NotNil(t, startup, "agent should send startup message")
|
||||
|
||||
err := agnt.Close()
|
||||
require.NoError(t, err, "agent should close cleanly")
|
||||
}
|
||||
|
||||
@@ -2,8 +2,6 @@ package agentsocket_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -30,14 +28,10 @@ func newSocketClient(ctx context.Context, t *testing.T, socketPath string) *agen
|
||||
func TestDRPCAgentSocketService(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("agentsocket is not supported on Windows")
|
||||
}
|
||||
|
||||
t.Run("Ping", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
|
||||
socketPath := testutil.AgentSocketPath(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
server, err := agentsocket.NewServer(
|
||||
slog.Make().Leveled(slog.LevelDebug),
|
||||
@@ -57,7 +51,7 @@ func TestDRPCAgentSocketService(t *testing.T) {
|
||||
|
||||
t.Run("NewUnit", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
|
||||
socketPath := testutil.AgentSocketPath(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
server, err := agentsocket.NewServer(
|
||||
slog.Make().Leveled(slog.LevelDebug),
|
||||
@@ -79,7 +73,7 @@ func TestDRPCAgentSocketService(t *testing.T) {
|
||||
t.Run("UnitAlreadyStarted", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
|
||||
socketPath := testutil.AgentSocketPath(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
server, err := agentsocket.NewServer(
|
||||
slog.Make().Leveled(slog.LevelDebug),
|
||||
@@ -109,7 +103,7 @@ func TestDRPCAgentSocketService(t *testing.T) {
|
||||
t.Run("UnitAlreadyCompleted", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
|
||||
socketPath := testutil.AgentSocketPath(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
server, err := agentsocket.NewServer(
|
||||
slog.Make().Leveled(slog.LevelDebug),
|
||||
@@ -148,7 +142,7 @@ func TestDRPCAgentSocketService(t *testing.T) {
|
||||
t.Run("UnitNotReady", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
|
||||
socketPath := testutil.AgentSocketPath(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
server, err := agentsocket.NewServer(
|
||||
slog.Make().Leveled(slog.LevelDebug),
|
||||
@@ -178,7 +172,7 @@ func TestDRPCAgentSocketService(t *testing.T) {
|
||||
t.Run("NewUnits", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
|
||||
socketPath := testutil.AgentSocketPath(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
server, err := agentsocket.NewServer(
|
||||
slog.Make().Leveled(slog.LevelDebug),
|
||||
@@ -203,7 +197,7 @@ func TestDRPCAgentSocketService(t *testing.T) {
|
||||
t.Run("DependencyAlreadyRegistered", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
|
||||
socketPath := testutil.AgentSocketPath(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
server, err := agentsocket.NewServer(
|
||||
slog.Make().Leveled(slog.LevelDebug),
|
||||
@@ -238,7 +232,7 @@ func TestDRPCAgentSocketService(t *testing.T) {
|
||||
t.Run("DependencyAddedAfterDependentStarted", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
|
||||
socketPath := testutil.AgentSocketPath(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
server, err := agentsocket.NewServer(
|
||||
slog.Make().Leveled(slog.LevelDebug),
|
||||
@@ -280,7 +274,7 @@ func TestDRPCAgentSocketService(t *testing.T) {
|
||||
t.Run("UnregisteredUnit", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
|
||||
socketPath := testutil.AgentSocketPath(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
server, err := agentsocket.NewServer(
|
||||
slog.Make().Leveled(slog.LevelDebug),
|
||||
@@ -299,7 +293,7 @@ func TestDRPCAgentSocketService(t *testing.T) {
|
||||
t.Run("UnitNotReady", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
|
||||
socketPath := testutil.AgentSocketPath(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
server, err := agentsocket.NewServer(
|
||||
slog.Make().Leveled(slog.LevelDebug),
|
||||
@@ -323,7 +317,7 @@ func TestDRPCAgentSocketService(t *testing.T) {
|
||||
t.Run("UnitReady", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
|
||||
socketPath := testutil.AgentSocketPath(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
server, err := agentsocket.NewServer(
|
||||
slog.Make().Leveled(slog.LevelDebug),
|
||||
|
||||
@@ -4,19 +4,60 @@ package agentsocket
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/user"
|
||||
"strings"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
func createSocket(_ string) (net.Listener, error) {
|
||||
return nil, xerrors.New("agentsocket is not supported on Windows")
|
||||
const defaultSocketPath = `\\.\pipe\com.coder.agentsocket`
|
||||
|
||||
func createSocket(path string) (net.Listener, error) {
|
||||
if path == "" {
|
||||
path = defaultSocketPath
|
||||
}
|
||||
if !strings.HasPrefix(path, `\\.\pipe\`) {
|
||||
return nil, xerrors.Errorf("%q is not a valid local socket path", path)
|
||||
}
|
||||
|
||||
user, err := user.Current()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to look up current user: %w", err)
|
||||
}
|
||||
sid := user.Uid
|
||||
|
||||
// SecurityDescriptor is in SDDL format. c.f.
|
||||
// https://learn.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format for full details.
|
||||
// D: indicates this is a Discretionary Access Control List (DACL), which is Windows-speak for ACLs that allow or
|
||||
// deny access (as opposed to SACL which controls audit logging).
|
||||
// P indicates that this DACL is "protected" from being modified thru inheritance
|
||||
// () delimit access control entries (ACEs), here we only have one, which, allows (A) generic all (GA) access to our
|
||||
// specific user's security ID (SID).
|
||||
//
|
||||
// Note that although Microsoft docs at https://learn.microsoft.com/en-us/windows/win32/ipc/named-pipes warns that
|
||||
// named pipes are accessible from remote machines in the general case, the `winio` package sets the flag
|
||||
// windows.FILE_PIPE_REJECT_REMOTE_CLIENTS when creating pipes, so connections from remote machines are always
|
||||
// denied. This is important because we sort of expect customers to run the Coder agent under a generic user
|
||||
// account unless they are very sophisticated. We don't want this socket to cross the boundary of the local machine.
|
||||
configuration := &winio.PipeConfig{
|
||||
SecurityDescriptor: fmt.Sprintf("D:P(A;;GA;;;%s)", sid),
|
||||
}
|
||||
|
||||
listener, err := winio.ListenPipe(path, configuration)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to open named pipe: %w", err)
|
||||
}
|
||||
return listener, nil
|
||||
}
|
||||
|
||||
func cleanupSocket(_ string) error {
|
||||
return nil
|
||||
func cleanupSocket(path string) error {
|
||||
return os.Remove(path)
|
||||
}
|
||||
|
||||
func dialSocket(_ context.Context, _ string) (net.Conn, error) {
|
||||
return nil, xerrors.New("agentsocket is not supported on Windows")
|
||||
func dialSocket(ctx context.Context, path string) (net.Conn, error) {
|
||||
return winio.DialPipeContext(ctx, path)
|
||||
}
|
||||
|
||||
@@ -124,6 +124,12 @@ func (c *Client) Close() {
|
||||
c.derpMapOnce.Do(func() { close(c.derpMapUpdates) })
|
||||
}
|
||||
|
||||
func (c *Client) ConnectRPC28WithRole(ctx context.Context, _ string) (
|
||||
agentproto.DRPCAgentClient28, proto.DRPCTailnetClient28, error,
|
||||
) {
|
||||
return c.ConnectRPC28(ctx)
|
||||
}
|
||||
|
||||
func (c *Client) ConnectRPC28(ctx context.Context) (
|
||||
agentproto.DRPCAgentClient28, proto.DRPCTailnetClient28, error,
|
||||
) {
|
||||
@@ -229,6 +235,10 @@ type FakeAgentAPI struct {
|
||||
pushResourcesMonitoringUsageFunc func(*agentproto.PushResourcesMonitoringUsageRequest) (*agentproto.PushResourcesMonitoringUsageResponse, error)
|
||||
}
|
||||
|
||||
func (*FakeAgentAPI) UpdateAppStatus(context.Context, *agentproto.UpdateAppStatusRequest) (*agentproto.UpdateAppStatusResponse, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *FakeAgentAPI) GetManifest(context.Context, *agentproto.GetManifestRequest) (*agentproto.Manifest, error) {
|
||||
return f.manifest, nil
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ func (a *agent) apiHandler() http.Handler {
|
||||
})
|
||||
|
||||
r.Mount("/api/v0", a.filesAPI.Routes())
|
||||
r.Mount("/api/v0/processes", a.processAPI.Routes())
|
||||
|
||||
if a.devcontainers {
|
||||
r.Mount("/api/v0/containers", a.containerAPI.Routes())
|
||||
|
||||
+543
-329
File diff suppressed because it is too large
Load Diff
+20
-1
@@ -436,7 +436,7 @@ message CreateSubAgentRequest {
|
||||
}
|
||||
|
||||
repeated DisplayApp display_apps = 6;
|
||||
|
||||
|
||||
optional bytes id = 7;
|
||||
}
|
||||
|
||||
@@ -494,6 +494,24 @@ message ReportBoundaryLogsRequest {
|
||||
|
||||
message ReportBoundaryLogsResponse {}
|
||||
|
||||
// UpdateAppStatusRequest updates the given Workspace App's status. c.f. agentsdk.PatchAppStatus
|
||||
message UpdateAppStatusRequest {
|
||||
string slug = 1;
|
||||
|
||||
enum AppStatusState {
|
||||
WORKING = 0;
|
||||
IDLE = 1;
|
||||
COMPLETE = 2;
|
||||
FAILURE = 3;
|
||||
}
|
||||
AppStatusState state = 2;
|
||||
|
||||
string message = 3;
|
||||
string uri = 4;
|
||||
}
|
||||
|
||||
message UpdateAppStatusResponse {}
|
||||
|
||||
service Agent {
|
||||
rpc GetManifest(GetManifestRequest) returns (Manifest);
|
||||
rpc GetServiceBanner(GetServiceBannerRequest) returns (ServiceBanner);
|
||||
@@ -512,4 +530,5 @@ service Agent {
|
||||
rpc DeleteSubAgent(DeleteSubAgentRequest) returns (DeleteSubAgentResponse);
|
||||
rpc ListSubAgents(ListSubAgentsRequest) returns (ListSubAgentsResponse);
|
||||
rpc ReportBoundaryLogs(ReportBoundaryLogsRequest) returns (ReportBoundaryLogsResponse);
|
||||
rpc UpdateAppStatus(UpdateAppStatusRequest) returns (UpdateAppStatusResponse);
|
||||
}
|
||||
|
||||
@@ -56,6 +56,7 @@ type DRPCAgentClient interface {
|
||||
DeleteSubAgent(ctx context.Context, in *DeleteSubAgentRequest) (*DeleteSubAgentResponse, error)
|
||||
ListSubAgents(ctx context.Context, in *ListSubAgentsRequest) (*ListSubAgentsResponse, error)
|
||||
ReportBoundaryLogs(ctx context.Context, in *ReportBoundaryLogsRequest) (*ReportBoundaryLogsResponse, error)
|
||||
UpdateAppStatus(ctx context.Context, in *UpdateAppStatusRequest) (*UpdateAppStatusResponse, error)
|
||||
}
|
||||
|
||||
type drpcAgentClient struct {
|
||||
@@ -221,6 +222,15 @@ func (c *drpcAgentClient) ReportBoundaryLogs(ctx context.Context, in *ReportBoun
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentClient) UpdateAppStatus(ctx context.Context, in *UpdateAppStatusRequest) (*UpdateAppStatusResponse, error) {
|
||||
out := new(UpdateAppStatusResponse)
|
||||
err := c.cc.Invoke(ctx, "/coder.agent.v2.Agent/UpdateAppStatus", drpcEncoding_File_agent_proto_agent_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
type DRPCAgentServer interface {
|
||||
GetManifest(context.Context, *GetManifestRequest) (*Manifest, error)
|
||||
GetServiceBanner(context.Context, *GetServiceBannerRequest) (*ServiceBanner, error)
|
||||
@@ -239,6 +249,7 @@ type DRPCAgentServer interface {
|
||||
DeleteSubAgent(context.Context, *DeleteSubAgentRequest) (*DeleteSubAgentResponse, error)
|
||||
ListSubAgents(context.Context, *ListSubAgentsRequest) (*ListSubAgentsResponse, error)
|
||||
ReportBoundaryLogs(context.Context, *ReportBoundaryLogsRequest) (*ReportBoundaryLogsResponse, error)
|
||||
UpdateAppStatus(context.Context, *UpdateAppStatusRequest) (*UpdateAppStatusResponse, error)
|
||||
}
|
||||
|
||||
type DRPCAgentUnimplementedServer struct{}
|
||||
@@ -311,9 +322,13 @@ func (s *DRPCAgentUnimplementedServer) ReportBoundaryLogs(context.Context, *Repo
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentUnimplementedServer) UpdateAppStatus(context.Context, *UpdateAppStatusRequest) (*UpdateAppStatusResponse, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
type DRPCAgentDescription struct{}
|
||||
|
||||
func (DRPCAgentDescription) NumMethods() int { return 17 }
|
||||
func (DRPCAgentDescription) NumMethods() int { return 18 }
|
||||
|
||||
func (DRPCAgentDescription) Method(n int) (string, drpc.Encoding, drpc.Receiver, interface{}, bool) {
|
||||
switch n {
|
||||
@@ -470,6 +485,15 @@ func (DRPCAgentDescription) Method(n int) (string, drpc.Encoding, drpc.Receiver,
|
||||
in1.(*ReportBoundaryLogsRequest),
|
||||
)
|
||||
}, DRPCAgentServer.ReportBoundaryLogs, true
|
||||
case 17:
|
||||
return "/coder.agent.v2.Agent/UpdateAppStatus", drpcEncoding_File_agent_proto_agent_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentServer).
|
||||
UpdateAppStatus(
|
||||
ctx,
|
||||
in1.(*UpdateAppStatusRequest),
|
||||
)
|
||||
}, DRPCAgentServer.UpdateAppStatus, true
|
||||
default:
|
||||
return "", nil, nil, nil, false
|
||||
}
|
||||
@@ -750,3 +774,19 @@ func (x *drpcAgent_ReportBoundaryLogsStream) SendAndClose(m *ReportBoundaryLogsR
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgent_UpdateAppStatusStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*UpdateAppStatusResponse) error
|
||||
}
|
||||
|
||||
type drpcAgent_UpdateAppStatusStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_UpdateAppStatusStream) SendAndClose(m *UpdateAppStatusResponse) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
@@ -73,9 +73,13 @@ type DRPCAgentClient27 interface {
|
||||
ReportBoundaryLogs(ctx context.Context, in *ReportBoundaryLogsRequest) (*ReportBoundaryLogsResponse, error)
|
||||
}
|
||||
|
||||
// DRPCAgentClient28 is the Agent API at v2.8. It adds a SubagentId field to the
|
||||
// WorkspaceAgentDevcontainer message, and a Id field to the CreateSubAgentRequest
|
||||
// message. Compatible with Coder v2.31+
|
||||
// DRPCAgentClient28 is the Agent API at v2.8. It adds
|
||||
// - a SubagentId field to the WorkspaceAgentDevcontainer message
|
||||
// - an Id field to the CreateSubAgentRequest message.
|
||||
// - UpdateAppStatus RPC.
|
||||
//
|
||||
// Compatible with Coder v2.31+
|
||||
type DRPCAgentClient28 interface {
|
||||
DRPCAgentClient27
|
||||
UpdateAppStatus(ctx context.Context, in *UpdateAppStatusRequest) (*UpdateAppStatusResponse, error)
|
||||
}
|
||||
|
||||
@@ -30,9 +30,15 @@ func RichParameter(inv *serpent.Invocation, templateVersionParameter codersdk.Te
|
||||
_, _ = fmt.Fprint(inv.Stdout, "\033[1A")
|
||||
|
||||
var defaults []string
|
||||
err = json.Unmarshal([]byte(templateVersionParameter.DefaultValue), &defaults)
|
||||
if err != nil {
|
||||
return "", err
|
||||
defaultSource := defaultValue
|
||||
if defaultSource == "" {
|
||||
defaultSource = templateVersionParameter.DefaultValue
|
||||
}
|
||||
if defaultSource != "" {
|
||||
err = json.Unmarshal([]byte(defaultSource), &defaults)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
values, err := RichMultiSelect(inv, RichMultiSelectOptions{
|
||||
|
||||
+50
-45
@@ -10,6 +10,7 @@ import (
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mark3labs/mcp-go/mcp"
|
||||
"github.com/mark3labs/mcp-go/server"
|
||||
@@ -23,6 +24,7 @@ import (
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
"github.com/coder/coder/v2/codersdk/toolsdk"
|
||||
"github.com/coder/retry"
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
|
||||
@@ -539,7 +541,6 @@ func (r *RootCmd) mcpServer() *serpent.Command {
|
||||
defer cancel()
|
||||
defer srv.queue.Close()
|
||||
|
||||
cliui.Infof(inv.Stderr, "Failed to watch screen events")
|
||||
// Start the reporter, watcher, and server. These are all tied to the
|
||||
// lifetime of the MCP server, which is itself tied to the lifetime of the
|
||||
// AI agent.
|
||||
@@ -613,48 +614,51 @@ func (s *mcpServer) startReporter(ctx context.Context, inv *serpent.Invocation)
|
||||
}
|
||||
|
||||
func (s *mcpServer) startWatcher(ctx context.Context, inv *serpent.Invocation) {
|
||||
eventsCh, errCh, err := s.aiAgentAPIClient.SubscribeEvents(ctx)
|
||||
if err != nil {
|
||||
cliui.Warnf(inv.Stderr, "Failed to watch screen events: %s", err)
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case event := <-eventsCh:
|
||||
switch ev := event.(type) {
|
||||
case agentapi.EventStatusChange:
|
||||
// If the screen is stable, report idle.
|
||||
state := codersdk.WorkspaceAppStatusStateWorking
|
||||
if ev.Status == agentapi.StatusStable {
|
||||
state = codersdk.WorkspaceAppStatusStateIdle
|
||||
}
|
||||
err := s.queue.Push(taskReport{
|
||||
state: state,
|
||||
})
|
||||
if err != nil {
|
||||
cliui.Warnf(inv.Stderr, "Failed to queue update: %s", err)
|
||||
for retrier := retry.New(time.Second, 30*time.Second); retrier.Wait(ctx); {
|
||||
eventsCh, errCh, err := s.aiAgentAPIClient.SubscribeEvents(ctx)
|
||||
if err == nil {
|
||||
retrier.Reset()
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
case agentapi.EventMessageUpdate:
|
||||
if ev.Role == agentapi.RoleUser {
|
||||
err := s.queue.Push(taskReport{
|
||||
messageID: &ev.Id,
|
||||
state: codersdk.WorkspaceAppStatusStateWorking,
|
||||
})
|
||||
if err != nil {
|
||||
cliui.Warnf(inv.Stderr, "Failed to queue update: %s", err)
|
||||
return
|
||||
case event := <-eventsCh:
|
||||
switch ev := event.(type) {
|
||||
case agentapi.EventStatusChange:
|
||||
state := codersdk.WorkspaceAppStatusStateWorking
|
||||
if ev.Status == agentapi.StatusStable {
|
||||
state = codersdk.WorkspaceAppStatusStateIdle
|
||||
}
|
||||
err := s.queue.Push(taskReport{
|
||||
state: state,
|
||||
})
|
||||
if err != nil {
|
||||
cliui.Warnf(inv.Stderr, "Failed to queue update: %s", err)
|
||||
return
|
||||
}
|
||||
case agentapi.EventMessageUpdate:
|
||||
if ev.Role == agentapi.RoleUser {
|
||||
err := s.queue.Push(taskReport{
|
||||
messageID: &ev.Id,
|
||||
state: codersdk.WorkspaceAppStatusStateWorking,
|
||||
})
|
||||
if err != nil {
|
||||
cliui.Warnf(inv.Stderr, "Failed to queue update: %s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
case err := <-errCh:
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
cliui.Warnf(inv.Stderr, "Received error from screen event watcher: %s", err)
|
||||
}
|
||||
break loop
|
||||
}
|
||||
}
|
||||
case err := <-errCh:
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
cliui.Warnf(inv.Stderr, "Received error from screen event watcher: %s", err)
|
||||
}
|
||||
return
|
||||
} else {
|
||||
cliui.Warnf(inv.Stderr, "Failed to watch screen events: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -692,13 +696,14 @@ func (s *mcpServer) startServer(ctx context.Context, inv *serpent.Invocation, in
|
||||
// Add tool dependencies.
|
||||
toolOpts := []func(*toolsdk.Deps){
|
||||
toolsdk.WithTaskReporter(func(args toolsdk.ReportTaskArgs) error {
|
||||
// The agent does not reliably report its status correctly. If AgentAPI
|
||||
// is enabled, we will always set the status to "working" when we get an
|
||||
// MCP message, and rely on the screen watcher to eventually catch the
|
||||
// idle state.
|
||||
state := codersdk.WorkspaceAppStatusStateWorking
|
||||
if s.aiAgentAPIClient == nil {
|
||||
state = codersdk.WorkspaceAppStatusState(args.State)
|
||||
state := codersdk.WorkspaceAppStatusState(args.State)
|
||||
// The agent does not reliably report idle, so when AgentAPI is
|
||||
// enabled we override idle to working and let the screen watcher
|
||||
// detect the real idle via StatusStable. Final states (failure,
|
||||
// complete) are trusted from the agent since the screen watcher
|
||||
// cannot produce them.
|
||||
if s.aiAgentAPIClient != nil && state == codersdk.WorkspaceAppStatusStateIdle {
|
||||
state = codersdk.WorkspaceAppStatusStateWorking
|
||||
}
|
||||
return s.queue.Push(taskReport{
|
||||
link: args.Link,
|
||||
|
||||
+185
-1
@@ -921,7 +921,7 @@ func TestExpMcpReporter(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
// We ignore the state from the agent and assume "working".
|
||||
// We override idle from the agent to working, but trust final states.
|
||||
{
|
||||
name: "IgnoreAgentState",
|
||||
// AI agent reports that it is finished but the summary says it is doing
|
||||
@@ -953,6 +953,46 @@ func TestExpMcpReporter(t *testing.T) {
|
||||
Message: "finished",
|
||||
},
|
||||
},
|
||||
// Agent reports failure; trusted even with AgentAPI enabled.
|
||||
{
|
||||
state: codersdk.WorkspaceAppStatusStateFailure,
|
||||
summary: "something broke",
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateFailure,
|
||||
Message: "something broke",
|
||||
},
|
||||
},
|
||||
// After failure, watcher reports stable -> idle.
|
||||
{
|
||||
event: makeStatusEvent(agentapi.StatusStable),
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateIdle,
|
||||
Message: "something broke",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// Final states pass through with AgentAPI enabled.
|
||||
{
|
||||
name: "AllowFinalStates",
|
||||
tests: []test{
|
||||
{
|
||||
state: codersdk.WorkspaceAppStatusStateWorking,
|
||||
summary: "doing work",
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateWorking,
|
||||
Message: "doing work",
|
||||
},
|
||||
},
|
||||
// Agent reports complete; not overridden.
|
||||
{
|
||||
state: codersdk.WorkspaceAppStatusStateComplete,
|
||||
summary: "all done",
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateComplete,
|
||||
Message: "all done",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// When AgentAPI is not being used, we accept agent state updates as-is.
|
||||
@@ -1110,4 +1150,148 @@ func TestExpMcpReporter(t *testing.T) {
|
||||
<-cmdDone
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("Reconnect", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a test deployment and workspace.
|
||||
client, db := coderdtest.NewWithDatabase(t, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
client, user2 := coderdtest.CreateAnotherUser(t, client, user.OrganizationID)
|
||||
|
||||
r := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user2.ID,
|
||||
}).WithAgent(func(a []*proto.Agent) []*proto.Agent {
|
||||
a[0].Apps = []*proto.App{
|
||||
{
|
||||
Slug: "vscode",
|
||||
},
|
||||
}
|
||||
return a
|
||||
}).Do()
|
||||
|
||||
ctx, cancel := context.WithCancel(testutil.Context(t, testutil.WaitLong))
|
||||
|
||||
// Watch the workspace for changes.
|
||||
watcher, err := client.WatchWorkspace(ctx, r.Workspace.ID)
|
||||
require.NoError(t, err)
|
||||
var lastAppStatus codersdk.WorkspaceAppStatus
|
||||
nextUpdate := func() codersdk.WorkspaceAppStatus {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
require.FailNow(t, "timed out waiting for status update")
|
||||
case w, ok := <-watcher:
|
||||
require.True(t, ok, "watch channel closed")
|
||||
if w.LatestAppStatus != nil && w.LatestAppStatus.ID != lastAppStatus.ID {
|
||||
t.Logf("Got status update: %s > %s", lastAppStatus.State, w.LatestAppStatus.State)
|
||||
lastAppStatus = *w.LatestAppStatus
|
||||
return lastAppStatus
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Mock AI AgentAPI server that supports disconnect/reconnect.
|
||||
disconnect := make(chan struct{})
|
||||
listening := make(chan func(sse codersdk.ServerSentEvent) error)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Create a cancelable context so we can stop the SSE sender
|
||||
// goroutine on disconnect without waiting for the HTTP
|
||||
// serve loop to cancel r.Context().
|
||||
sseCtx, sseCancel := context.WithCancel(r.Context())
|
||||
defer sseCancel()
|
||||
r = r.WithContext(sseCtx)
|
||||
|
||||
send, closed, err := httpapi.ServerSentEventSender(w, r)
|
||||
if err != nil {
|
||||
httpapi.Write(sseCtx, w, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error setting up server-sent events.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
// Send initial message so the watcher knows the agent is active.
|
||||
send(*makeMessageEvent(0, agentapi.RoleAgent))
|
||||
select {
|
||||
case listening <- send:
|
||||
case <-r.Context().Done():
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-closed:
|
||||
case <-disconnect:
|
||||
sseCancel()
|
||||
<-closed
|
||||
}
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
inv, _ := clitest.New(t,
|
||||
"exp", "mcp", "server",
|
||||
"--agent-url", client.URL.String(),
|
||||
"--agent-token", r.AgentToken,
|
||||
"--app-status-slug", "vscode",
|
||||
"--allowed-tools=coder_report_task",
|
||||
"--ai-agentapi-url", srv.URL,
|
||||
)
|
||||
inv = inv.WithContext(ctx)
|
||||
|
||||
pty := ptytest.New(t)
|
||||
inv.Stdin = pty.Input()
|
||||
inv.Stdout = pty.Output()
|
||||
stderr := ptytest.New(t)
|
||||
inv.Stderr = stderr.Output()
|
||||
|
||||
// Run the MCP server.
|
||||
clitest.Start(t, inv)
|
||||
|
||||
// Initialize.
|
||||
payload := `{"jsonrpc":"2.0","id":1,"method":"initialize"}`
|
||||
pty.WriteLine(payload)
|
||||
_ = pty.ReadLine(ctx) // ignore echo
|
||||
_ = pty.ReadLine(ctx) // ignore init response
|
||||
|
||||
// Get first sender from the initial SSE connection.
|
||||
sender := testutil.RequireReceive(ctx, t, listening)
|
||||
|
||||
// Self-report a working status via tool call.
|
||||
toolPayload := `{"jsonrpc":"2.0","id":2,"method":"tools/call","params":{"name":"coder_report_task","arguments":{"state":"working","summary":"doing work","link":""}}}`
|
||||
pty.WriteLine(toolPayload)
|
||||
_ = pty.ReadLine(ctx) // ignore echo
|
||||
_ = pty.ReadLine(ctx) // ignore response
|
||||
got := nextUpdate()
|
||||
require.Equal(t, codersdk.WorkspaceAppStatusStateWorking, got.State)
|
||||
require.Equal(t, "doing work", got.Message)
|
||||
|
||||
// Watcher sends stable, verify idle is reported.
|
||||
err = sender(*makeStatusEvent(agentapi.StatusStable))
|
||||
require.NoError(t, err)
|
||||
got = nextUpdate()
|
||||
require.Equal(t, codersdk.WorkspaceAppStatusStateIdle, got.State)
|
||||
|
||||
// Disconnect the SSE connection by signaling the handler to return.
|
||||
testutil.RequireSend(ctx, t, disconnect, struct{}{})
|
||||
|
||||
// Wait for the watcher to reconnect and get the new sender.
|
||||
sender = testutil.RequireReceive(ctx, t, listening)
|
||||
|
||||
// After reconnect, self-report a working status again.
|
||||
toolPayload = `{"jsonrpc":"2.0","id":3,"method":"tools/call","params":{"name":"coder_report_task","arguments":{"state":"working","summary":"reconnected","link":""}}}`
|
||||
pty.WriteLine(toolPayload)
|
||||
_ = pty.ReadLine(ctx) // ignore echo
|
||||
_ = pty.ReadLine(ctx) // ignore response
|
||||
got = nextUpdate()
|
||||
require.Equal(t, codersdk.WorkspaceAppStatusStateWorking, got.State)
|
||||
require.Equal(t, "reconnected", got.Message)
|
||||
|
||||
// Verify the watcher still processes events after reconnect.
|
||||
err = sender(*makeStatusEvent(agentapi.StatusStable))
|
||||
require.NoError(t, err)
|
||||
got = nextUpdate()
|
||||
require.Equal(t, codersdk.WorkspaceAppStatusStateIdle, got.State)
|
||||
|
||||
cancel()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ func (r *RootCmd) scaletestPrebuilds() *serpent.Command {
|
||||
templateVersionJobTimeout time.Duration
|
||||
prebuildWorkspaceTimeout time.Duration
|
||||
noCleanup bool
|
||||
provisionerTags []string
|
||||
|
||||
tracingFlags = &scaletestTracingFlags{}
|
||||
timeoutStrategy = &timeoutFlags{}
|
||||
@@ -111,10 +112,16 @@ func (r *RootCmd) scaletestPrebuilds() *serpent.Command {
|
||||
|
||||
th := harness.NewTestHarness(timeoutStrategy.wrapStrategy(harness.ConcurrentExecutionStrategy{}), cleanupStrategy.toStrategy())
|
||||
|
||||
tags, err := ParseProvisionerTags(provisionerTags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range numTemplates {
|
||||
id := strconv.Itoa(int(i))
|
||||
cfg := prebuilds.Config{
|
||||
OrganizationID: me.OrganizationIDs[0],
|
||||
ProvisionerTags: tags,
|
||||
NumPresets: int(numPresets),
|
||||
NumPresetPrebuilds: int(numPresetPrebuilds),
|
||||
TemplateVersionJobTimeout: templateVersionJobTimeout,
|
||||
@@ -283,6 +290,11 @@ func (r *RootCmd) scaletestPrebuilds() *serpent.Command {
|
||||
Description: "Skip cleanup (deletion test) and leave resources intact.",
|
||||
Value: serpent.BoolOf(&noCleanup),
|
||||
},
|
||||
{
|
||||
Flag: "provisioner-tag",
|
||||
Description: "Specify a set of tags to target provisioner daemons.",
|
||||
Value: serpent.StringArrayOf(&provisionerTags),
|
||||
},
|
||||
}
|
||||
|
||||
tracingFlags.attach(&cmd.Options)
|
||||
|
||||
+45
-1
@@ -4,6 +4,9 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
@@ -16,6 +19,29 @@ import (
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
|
||||
// detectGitRef attempts to resolve the current git branch and remote
|
||||
// origin URL from the given working directory. These are sent to the
|
||||
// control plane so it can look up PR/diff status via the GitHub API
|
||||
// without SSHing into the workspace. Failures are silently ignored
|
||||
// since this is best-effort.
|
||||
func detectGitRef(workingDirectory string) (branch string, remoteOrigin string) {
|
||||
run := func(args ...string) string {
|
||||
//nolint:gosec
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
if workingDirectory != "" {
|
||||
cmd.Dir = workingDirectory
|
||||
}
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSpace(string(out))
|
||||
}
|
||||
branch = run("git", "rev-parse", "--abbrev-ref", "HEAD")
|
||||
remoteOrigin = run("git", "config", "--get", "remote.origin.url")
|
||||
return branch, remoteOrigin
|
||||
}
|
||||
|
||||
// gitAskpass is used by the Coder agent to automatically authenticate
|
||||
// with Git providers based on a hostname.
|
||||
func gitAskpass(agentAuth *AgentAuth) *serpent.Command {
|
||||
@@ -38,8 +64,20 @@ func gitAskpass(agentAuth *AgentAuth) *serpent.Command {
|
||||
return xerrors.Errorf("create agent client: %w", err)
|
||||
}
|
||||
|
||||
workingDirectory, err := os.Getwd()
|
||||
if err != nil {
|
||||
workingDirectory = ""
|
||||
}
|
||||
|
||||
// Detect the current git branch and remote origin so
|
||||
// the control plane can resolve diffs without needing
|
||||
// to SSH back into the workspace.
|
||||
gitBranch, gitRemoteOrigin := detectGitRef(workingDirectory)
|
||||
|
||||
token, err := client.ExternalAuth(ctx, agentsdk.ExternalAuthRequest{
|
||||
Match: host,
|
||||
Match: host,
|
||||
GitBranch: gitBranch,
|
||||
GitRemoteOrigin: gitRemoteOrigin,
|
||||
})
|
||||
if err != nil {
|
||||
var apiError *codersdk.Error
|
||||
@@ -58,6 +96,12 @@ func gitAskpass(agentAuth *AgentAuth) *serpent.Command {
|
||||
return xerrors.Errorf("get git token: %w", err)
|
||||
}
|
||||
if token.URL != "" {
|
||||
// This is to help the agent authenticate with Git.
|
||||
if inv.Environ.Get("CODER_CHAT_AGENT") == "true" {
|
||||
_, _ = fmt.Fprintf(inv.Stderr, `You must notify the user to authenticate with Git.\n\nThe URL is: %s\n`, token.URL)
|
||||
return cliui.ErrCanceled
|
||||
}
|
||||
|
||||
if err := openURL(inv, token.URL); err == nil {
|
||||
cliui.Infof(inv.Stderr, "Your browser has been opened to authenticate with Git:\n%s", token.URL)
|
||||
} else {
|
||||
|
||||
+1
-5
@@ -106,11 +106,7 @@ func TestList(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
DeploymentValues: coderdtest.DeploymentValues(t, func(dv *codersdk.DeploymentValues) {
|
||||
dv.Experiments = []string{string(codersdk.ExperimentWorkspaceSharing)}
|
||||
}),
|
||||
})
|
||||
client, db = coderdtest.NewWithDatabase(t, nil)
|
||||
orgOwner = coderdtest.CreateFirstUser(t, client)
|
||||
memberClient, member = coderdtest.CreateAnotherUser(t, client, orgOwner.OrganizationID, rbac.ScopedRoleOrgAuditor(orgOwner.OrganizationID))
|
||||
sharedWorkspace = dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
@@ -231,7 +232,7 @@ next:
|
||||
continue // immutables should not be passed to consecutive builds
|
||||
}
|
||||
|
||||
if len(tvp.Options) > 0 && !isValidTemplateParameterOption(buildParameter, tvp.Options) {
|
||||
if len(tvp.Options) > 0 && !isValidTemplateParameterOption(buildParameter, *tvp) {
|
||||
continue // do not propagate invalid options
|
||||
}
|
||||
|
||||
@@ -297,7 +298,7 @@ func (pr *ParameterResolver) verifyConstraints(resolved []codersdk.WorkspaceBuil
|
||||
return xerrors.Errorf("ephemeral parameter %q can be used only with --prompt-ephemeral-parameters or --ephemeral-parameter flag", r.Name)
|
||||
}
|
||||
|
||||
if !tvp.Mutable && action != WorkspaceCreate {
|
||||
if !tvp.Mutable && action != WorkspaceCreate && !pr.isFirstTimeUse(r.Name) {
|
||||
return xerrors.Errorf("parameter %q is immutable and cannot be updated", r.Name)
|
||||
}
|
||||
}
|
||||
@@ -365,7 +366,7 @@ func (pr *ParameterResolver) isLastBuildParameterInvalidOption(templateVersionPa
|
||||
|
||||
for _, buildParameter := range pr.lastBuildParameters {
|
||||
if buildParameter.Name == templateVersionParameter.Name {
|
||||
return !isValidTemplateParameterOption(buildParameter, templateVersionParameter.Options)
|
||||
return !isValidTemplateParameterOption(buildParameter, templateVersionParameter)
|
||||
}
|
||||
}
|
||||
return false
|
||||
@@ -389,8 +390,31 @@ func findWorkspaceBuildParameter(parameterName string, params []codersdk.Workspa
|
||||
return nil
|
||||
}
|
||||
|
||||
func isValidTemplateParameterOption(buildParameter codersdk.WorkspaceBuildParameter, options []codersdk.TemplateVersionParameterOption) bool {
|
||||
for _, opt := range options {
|
||||
func isValidTemplateParameterOption(buildParameter codersdk.WorkspaceBuildParameter, templateVersionParameter codersdk.TemplateVersionParameter) bool {
|
||||
// Multi-select parameters store values as a JSON array (e.g.
|
||||
// '["vim","emacs"]'), so we need to parse the array and validate
|
||||
// each element individually against the allowed options.
|
||||
if templateVersionParameter.Type == "list(string)" {
|
||||
var values []string
|
||||
if err := json.Unmarshal([]byte(buildParameter.Value), &values); err != nil {
|
||||
return false
|
||||
}
|
||||
for _, v := range values {
|
||||
found := false
|
||||
for _, opt := range templateVersionParameter.Options {
|
||||
if opt.Value == v {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
for _, opt := range templateVersionParameter.Options {
|
||||
if opt.Value == buildParameter.Value {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -0,0 +1,85 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
func TestIsValidTemplateParameterOption(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
options := []codersdk.TemplateVersionParameterOption{
|
||||
{Name: "Vim", Value: "vim"},
|
||||
{Name: "Emacs", Value: "emacs"},
|
||||
{Name: "VS Code", Value: "vscode"},
|
||||
}
|
||||
|
||||
t.Run("SingleSelectValid", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
bp := codersdk.WorkspaceBuildParameter{Name: "editor", Value: "vim"}
|
||||
tvp := codersdk.TemplateVersionParameter{
|
||||
Name: "editor",
|
||||
Type: "string",
|
||||
Options: options,
|
||||
}
|
||||
assert.True(t, isValidTemplateParameterOption(bp, tvp))
|
||||
})
|
||||
|
||||
t.Run("SingleSelectInvalid", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
bp := codersdk.WorkspaceBuildParameter{Name: "editor", Value: "notepad"}
|
||||
tvp := codersdk.TemplateVersionParameter{
|
||||
Name: "editor",
|
||||
Type: "string",
|
||||
Options: options,
|
||||
}
|
||||
assert.False(t, isValidTemplateParameterOption(bp, tvp))
|
||||
})
|
||||
|
||||
t.Run("MultiSelectAllValid", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
bp := codersdk.WorkspaceBuildParameter{Name: "editors", Value: `["vim","emacs"]`}
|
||||
tvp := codersdk.TemplateVersionParameter{
|
||||
Name: "editors",
|
||||
Type: "list(string)",
|
||||
Options: options,
|
||||
}
|
||||
assert.True(t, isValidTemplateParameterOption(bp, tvp))
|
||||
})
|
||||
|
||||
t.Run("MultiSelectOneInvalid", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
bp := codersdk.WorkspaceBuildParameter{Name: "editors", Value: `["vim","notepad"]`}
|
||||
tvp := codersdk.TemplateVersionParameter{
|
||||
Name: "editors",
|
||||
Type: "list(string)",
|
||||
Options: options,
|
||||
}
|
||||
assert.False(t, isValidTemplateParameterOption(bp, tvp))
|
||||
})
|
||||
|
||||
t.Run("MultiSelectEmptyArray", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
bp := codersdk.WorkspaceBuildParameter{Name: "editors", Value: `[]`}
|
||||
tvp := codersdk.TemplateVersionParameter{
|
||||
Name: "editors",
|
||||
Type: "list(string)",
|
||||
Options: options,
|
||||
}
|
||||
assert.True(t, isValidTemplateParameterOption(bp, tvp))
|
||||
})
|
||||
|
||||
t.Run("MultiSelectInvalidJSON", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
bp := codersdk.WorkspaceBuildParameter{Name: "editors", Value: `not-json`}
|
||||
tvp := codersdk.TemplateVersionParameter{
|
||||
Name: "editors",
|
||||
Type: "list(string)",
|
||||
Options: options,
|
||||
}
|
||||
assert.False(t, isValidTemplateParameterOption(bp, tvp))
|
||||
})
|
||||
}
|
||||
+120
-43
@@ -137,6 +137,15 @@ func createOIDCConfig(ctx context.Context, logger slog.Logger, vals *codersdk.De
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse oidc oauth callback url: %w", err)
|
||||
}
|
||||
|
||||
if vals.OIDC.RedirectURL.String() != "" {
|
||||
redirectURL, err = vals.OIDC.RedirectURL.Value().Parse("/api/v2/users/oidc/callback")
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse oidc redirect url %q", err)
|
||||
}
|
||||
logger.Warn(ctx, "custom OIDC redirect URL used instead of 'access_url', ensure this matches the value configured in your OIDC provider")
|
||||
}
|
||||
|
||||
// If the scopes contain 'groups', we enable group support.
|
||||
// Do not override any custom value set by the user.
|
||||
if slice.Contains(vals.OIDC.Scopes, "groups") && vals.OIDC.GroupField == "" {
|
||||
@@ -608,28 +617,8 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
|
||||
}
|
||||
}
|
||||
|
||||
extAuthEnv, err := ReadExternalAuthProvidersFromEnv(os.Environ())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("read external auth providers from env: %w", err)
|
||||
}
|
||||
|
||||
promRegistry := prometheus.NewRegistry()
|
||||
oauthInstrument := promoauth.NewFactory(promRegistry)
|
||||
vals.ExternalAuthConfigs.Value = append(vals.ExternalAuthConfigs.Value, extAuthEnv...)
|
||||
externalAuthConfigs, err := externalauth.ConvertConfig(
|
||||
oauthInstrument,
|
||||
vals.ExternalAuthConfigs.Value,
|
||||
vals.AccessURL.Value(),
|
||||
)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("convert external auth config: %w", err)
|
||||
}
|
||||
for _, c := range externalAuthConfigs {
|
||||
logger.Debug(
|
||||
ctx, "loaded external auth config",
|
||||
slog.F("id", c.ID),
|
||||
)
|
||||
}
|
||||
|
||||
realIPConfig, err := httpmw.ParseRealIPConfig(vals.ProxyTrustedHeaders, vals.ProxyTrustedOrigins)
|
||||
if err != nil {
|
||||
@@ -660,7 +649,7 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
|
||||
Pubsub: nil,
|
||||
CacheDir: cacheDir,
|
||||
GoogleTokenValidator: googleTokenValidator,
|
||||
ExternalAuthConfigs: externalAuthConfigs,
|
||||
ExternalAuthConfigs: nil,
|
||||
RealIPConfig: realIPConfig,
|
||||
SSHKeygenAlgorithm: sshKeygenAlgorithm,
|
||||
TracerProvider: tracerProvider,
|
||||
@@ -820,6 +809,40 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
|
||||
return xerrors.Errorf("set deployment id: %w", err)
|
||||
}
|
||||
|
||||
extAuthEnv, err := ReadExternalAuthProvidersFromEnv(os.Environ())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("read external auth providers from env: %w", err)
|
||||
}
|
||||
mergedExternalAuthProviders := append([]codersdk.ExternalAuthConfig{}, vals.ExternalAuthConfigs.Value...)
|
||||
mergedExternalAuthProviders = append(mergedExternalAuthProviders, extAuthEnv...)
|
||||
vals.ExternalAuthConfigs.Value = mergedExternalAuthProviders
|
||||
|
||||
mergedExternalAuthProviders, err = maybeAppendDefaultGithubExternalAuthProvider(
|
||||
ctx,
|
||||
options.Logger,
|
||||
options.Database,
|
||||
vals,
|
||||
mergedExternalAuthProviders,
|
||||
)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("maybe append default github external auth provider: %w", err)
|
||||
}
|
||||
|
||||
options.ExternalAuthConfigs, err = externalauth.ConvertConfig(
|
||||
oauthInstrument,
|
||||
mergedExternalAuthProviders,
|
||||
vals.AccessURL.Value(),
|
||||
)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("convert external auth config: %w", err)
|
||||
}
|
||||
for _, c := range options.ExternalAuthConfigs {
|
||||
logger.Debug(
|
||||
ctx, "loaded external auth config",
|
||||
slog.F("id", c.ID),
|
||||
)
|
||||
}
|
||||
|
||||
// Manage push notifications.
|
||||
experiments := coderd.ReadExperiments(options.Logger, options.DeploymentValues.Experiments.Value())
|
||||
if experiments.Enabled(codersdk.ExperimentWebPush) {
|
||||
@@ -1917,6 +1940,79 @@ type githubOAuth2ConfigParams struct {
|
||||
enterpriseBaseURL string
|
||||
}
|
||||
|
||||
func isDeploymentEligibleForGithubDefaultProvider(ctx context.Context, db database.Store) (bool, error) {
|
||||
// We want to enable the default provider only for new deployments, and avoid
|
||||
// enabling it if a deployment was upgraded from an older version.
|
||||
// nolint:gocritic // Requires system privileges
|
||||
defaultEligible, err := db.GetOAuth2GithubDefaultEligible(dbauthz.AsSystemRestricted(ctx))
|
||||
if err != nil && !errors.Is(err, sql.ErrNoRows) {
|
||||
return false, xerrors.Errorf("get github default eligible: %w", err)
|
||||
}
|
||||
defaultEligibleNotSet := errors.Is(err, sql.ErrNoRows)
|
||||
|
||||
if defaultEligibleNotSet {
|
||||
// nolint:gocritic // User count requires system privileges
|
||||
userCount, err := db.GetUserCount(dbauthz.AsSystemRestricted(ctx), false)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("get user count: %w", err)
|
||||
}
|
||||
// We check if a deployment is new by checking if it has any users.
|
||||
defaultEligible = userCount == 0
|
||||
// nolint:gocritic // Requires system privileges
|
||||
if err := db.UpsertOAuth2GithubDefaultEligible(dbauthz.AsSystemRestricted(ctx), defaultEligible); err != nil {
|
||||
return false, xerrors.Errorf("upsert github default eligible: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return defaultEligible, nil
|
||||
}
|
||||
|
||||
func maybeAppendDefaultGithubExternalAuthProvider(
|
||||
ctx context.Context,
|
||||
logger slog.Logger,
|
||||
db database.Store,
|
||||
vals *codersdk.DeploymentValues,
|
||||
mergedExplicitProviders []codersdk.ExternalAuthConfig,
|
||||
) ([]codersdk.ExternalAuthConfig, error) {
|
||||
if !vals.ExternalAuthGithubDefaultProviderEnable.Value() {
|
||||
logger.Info(ctx, "default github external auth provider suppressed",
|
||||
slog.F("reason", "disabled by configuration"),
|
||||
slog.F("flag", "external-auth-github-default-provider-enable"),
|
||||
)
|
||||
return mergedExplicitProviders, nil
|
||||
}
|
||||
|
||||
if len(mergedExplicitProviders) > 0 {
|
||||
logger.Info(ctx, "default github external auth provider suppressed",
|
||||
slog.F("reason", "explicit external auth providers configured"),
|
||||
slog.F("provider_count", len(mergedExplicitProviders)),
|
||||
)
|
||||
return mergedExplicitProviders, nil
|
||||
}
|
||||
|
||||
defaultEligible, err := isDeploymentEligibleForGithubDefaultProvider(ctx, db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !defaultEligible {
|
||||
logger.Info(ctx, "default github external auth provider suppressed",
|
||||
slog.F("reason", "deployment is not eligible"),
|
||||
)
|
||||
return mergedExplicitProviders, nil
|
||||
}
|
||||
|
||||
logger.Info(ctx, "injecting default github external auth provider",
|
||||
slog.F("type", codersdk.EnhancedExternalAuthProviderGitHub.String()),
|
||||
slog.F("client_id", GithubOAuth2DefaultProviderClientID),
|
||||
slog.F("device_flow", GithubOAuth2DefaultProviderDeviceFlow),
|
||||
)
|
||||
return append(mergedExplicitProviders, codersdk.ExternalAuthConfig{
|
||||
Type: codersdk.EnhancedExternalAuthProviderGitHub.String(),
|
||||
ClientID: GithubOAuth2DefaultProviderClientID,
|
||||
DeviceFlow: GithubOAuth2DefaultProviderDeviceFlow,
|
||||
}), nil
|
||||
}
|
||||
|
||||
func getGithubOAuth2ConfigParams(ctx context.Context, db database.Store, vals *codersdk.DeploymentValues) (*githubOAuth2ConfigParams, error) {
|
||||
params := githubOAuth2ConfigParams{
|
||||
accessURL: vals.AccessURL.Value(),
|
||||
@@ -1941,28 +2037,9 @@ func getGithubOAuth2ConfigParams(ctx context.Context, db database.Store, vals *c
|
||||
return nil, nil //nolint:nilnil
|
||||
}
|
||||
|
||||
// Check if the deployment is eligible for the default GitHub OAuth2 provider.
|
||||
// We want to enable it only for new deployments, and avoid enabling it
|
||||
// if a deployment was upgraded from an older version.
|
||||
// nolint:gocritic // Requires system privileges
|
||||
defaultEligible, err := db.GetOAuth2GithubDefaultEligible(dbauthz.AsSystemRestricted(ctx))
|
||||
if err != nil && !errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, xerrors.Errorf("get github default eligible: %w", err)
|
||||
}
|
||||
defaultEligibleNotSet := errors.Is(err, sql.ErrNoRows)
|
||||
|
||||
if defaultEligibleNotSet {
|
||||
// nolint:gocritic // User count requires system privileges
|
||||
userCount, err := db.GetUserCount(dbauthz.AsSystemRestricted(ctx), false)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get user count: %w", err)
|
||||
}
|
||||
// We check if a deployment is new by checking if it has any users.
|
||||
defaultEligible = userCount == 0
|
||||
// nolint:gocritic // Requires system privileges
|
||||
if err := db.UpsertOAuth2GithubDefaultEligible(dbauthz.AsSystemRestricted(ctx), defaultEligible); err != nil {
|
||||
return nil, xerrors.Errorf("upsert github default eligible: %w", err)
|
||||
}
|
||||
defaultEligible, err := isDeploymentEligibleForGithubDefaultProvider(ctx, db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !defaultEligible {
|
||||
|
||||
@@ -53,6 +53,7 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/database/migrations"
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/coderd/telemetry"
|
||||
"github.com/coder/coder/v2/coderd/userpassword"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/cryptorand"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
@@ -302,6 +303,7 @@ func TestServer(t *testing.T) {
|
||||
"open install.sh: file does not exist",
|
||||
"telemetry disabled, unable to notify of security issues",
|
||||
"installed terraform version newer than expected",
|
||||
"report generator",
|
||||
}
|
||||
|
||||
countLines := func(fullOutput string) int {
|
||||
@@ -1740,6 +1742,18 @@ func TestServer(t *testing.T) {
|
||||
|
||||
// Next, we instruct the same server to display the YAML config
|
||||
// and then save it.
|
||||
// Because this is literally the same invocation, DefaultFn sets the
|
||||
// value of 'Default'. Which triggers a mutually exclusive error
|
||||
// on the next parse.
|
||||
// Usually we only parse flags once, so this is not an issue
|
||||
for _, c := range inv.Command.Children {
|
||||
if c.Name() == "server" {
|
||||
for i := range c.Options {
|
||||
c.Options[i].DefaultFn = nil
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
inv = inv.WithContext(testutil.Context(t, testutil.WaitMedium))
|
||||
//nolint:gocritic
|
||||
inv.Args = append(args, "--write-config")
|
||||
@@ -1793,6 +1807,155 @@ func TestServer(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
//nolint:tparallel,paralleltest // This test sets environment variables.
|
||||
func TestServer_ExternalAuthGitHubDefaultProvider(t *testing.T) {
|
||||
type testCase struct {
|
||||
name string
|
||||
args []string
|
||||
env map[string]string
|
||||
createUserPreStart bool
|
||||
expectedProviders []string
|
||||
}
|
||||
|
||||
run := func(t *testing.T, tc testCase) {
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
unsetPrefixedEnv := func(prefix string) {
|
||||
t.Helper()
|
||||
for _, envVar := range os.Environ() {
|
||||
envKey, _, found := strings.Cut(envVar, "=")
|
||||
if !found || !strings.HasPrefix(envKey, prefix) {
|
||||
continue
|
||||
}
|
||||
value, had := os.LookupEnv(envKey)
|
||||
require.True(t, had)
|
||||
require.NoError(t, os.Unsetenv(envKey))
|
||||
keyCopy := envKey
|
||||
valueCopy := value
|
||||
t.Cleanup(func() {
|
||||
// This is for setting/unsetting a number of prefixed env vars.
|
||||
// t.Setenv doesn't cover this use case.
|
||||
// nolint:usetesting
|
||||
_ = os.Setenv(keyCopy, valueCopy)
|
||||
})
|
||||
}
|
||||
}
|
||||
unsetPrefixedEnv("CODER_EXTERNAL_AUTH_")
|
||||
unsetPrefixedEnv("CODER_GITAUTH_")
|
||||
|
||||
dbURL, err := dbtestutil.Open(t)
|
||||
require.NoError(t, err)
|
||||
db, _ := dbtestutil.NewDB(t, dbtestutil.WithURL(dbURL))
|
||||
|
||||
const (
|
||||
existingUserEmail = "existing-user@coder.com"
|
||||
existingUserUsername = "existing-user"
|
||||
existingUserPassword = "SomeSecurePassword!"
|
||||
)
|
||||
if tc.createUserPreStart {
|
||||
hashedPassword, err := userpassword.Hash(existingUserPassword)
|
||||
require.NoError(t, err)
|
||||
_ = dbgen.User(t, db, database.User{
|
||||
Email: existingUserEmail,
|
||||
Username: existingUserUsername,
|
||||
HashedPassword: []byte(hashedPassword),
|
||||
})
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"server",
|
||||
"--postgres-url", dbURL,
|
||||
"--http-address", ":0",
|
||||
"--access-url", "https://example.com",
|
||||
}
|
||||
args = append(args, tc.args...)
|
||||
|
||||
inv, cfg := clitest.New(t, args...)
|
||||
for envKey, value := range tc.env {
|
||||
t.Setenv(envKey, value)
|
||||
}
|
||||
clitest.Start(t, inv)
|
||||
|
||||
accessURL := waitAccessURL(t, cfg)
|
||||
client := codersdk.New(accessURL)
|
||||
|
||||
if tc.createUserPreStart {
|
||||
loginResp, err := client.LoginWithPassword(ctx, codersdk.LoginWithPasswordRequest{
|
||||
Email: existingUserEmail,
|
||||
Password: existingUserPassword,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
client.SetSessionToken(loginResp.SessionToken)
|
||||
} else {
|
||||
_ = coderdtest.CreateFirstUser(t, client)
|
||||
}
|
||||
|
||||
externalAuthResp, err := client.ListExternalAuths(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotProviders := map[string]codersdk.ExternalAuthLinkProvider{}
|
||||
for _, provider := range externalAuthResp.Providers {
|
||||
gotProviders[provider.ID] = provider
|
||||
}
|
||||
require.Len(t, gotProviders, len(tc.expectedProviders))
|
||||
|
||||
for _, providerID := range tc.expectedProviders {
|
||||
provider, ok := gotProviders[providerID]
|
||||
require.Truef(t, ok, "expected provider %q to be configured", providerID)
|
||||
if providerID == codersdk.EnhancedExternalAuthProviderGitHub.String() {
|
||||
require.Equal(t, codersdk.EnhancedExternalAuthProviderGitHub.String(), provider.Type)
|
||||
require.True(t, provider.Device)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, tc := range []testCase{
|
||||
{
|
||||
name: "NewDeployment_NoExplicitProviders_InjectsDefaultGithub",
|
||||
expectedProviders: []string{codersdk.EnhancedExternalAuthProviderGitHub.String()},
|
||||
},
|
||||
{
|
||||
name: "ExistingDeployment_DoesNotInjectDefaultGithub",
|
||||
createUserPreStart: true,
|
||||
expectedProviders: nil,
|
||||
},
|
||||
{
|
||||
name: "DefaultProviderDisabled_DoesNotInjectDefaultGithub",
|
||||
args: []string{
|
||||
"--external-auth-github-default-provider-enable=false",
|
||||
},
|
||||
expectedProviders: nil,
|
||||
},
|
||||
{
|
||||
name: "ExplicitProviderViaConfig_DoesNotInjectDefaultGithub",
|
||||
args: []string{
|
||||
`--external-auth-providers=[{"type":"gitlab","client_id":"config-client-id"}]`,
|
||||
},
|
||||
expectedProviders: []string{codersdk.EnhancedExternalAuthProviderGitLab.String()},
|
||||
},
|
||||
{
|
||||
name: "ExplicitProviderViaEnv_DoesNotInjectDefaultGithub",
|
||||
env: map[string]string{
|
||||
"CODER_EXTERNAL_AUTH_0_TYPE": codersdk.EnhancedExternalAuthProviderGitLab.String(),
|
||||
"CODER_EXTERNAL_AUTH_0_CLIENT_ID": "env-client-id",
|
||||
},
|
||||
expectedProviders: []string{codersdk.EnhancedExternalAuthProviderGitLab.String()},
|
||||
},
|
||||
{
|
||||
name: "ExplicitProviderViaLegacyEnv_DoesNotInjectDefaultGithub",
|
||||
env: map[string]string{
|
||||
"CODER_GITAUTH_0_TYPE": codersdk.EnhancedExternalAuthProviderGitLab.String(),
|
||||
"CODER_GITAUTH_0_CLIENT_ID": "legacy-env-client-id",
|
||||
},
|
||||
expectedProviders: []string{codersdk.EnhancedExternalAuthProviderGitLab.String()},
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
run(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:tparallel,paralleltest // This test sets environment variables.
|
||||
func TestServer_Logging_NoParallel(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
+7
-31
@@ -25,11 +25,7 @@ func TestSharingShare(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
DeploymentValues: coderdtest.DeploymentValues(t, func(dv *codersdk.DeploymentValues) {
|
||||
dv.Experiments = []string{string(codersdk.ExperimentWorkspaceSharing)}
|
||||
}),
|
||||
})
|
||||
client, db = coderdtest.NewWithDatabase(t, nil)
|
||||
orgOwner = coderdtest.CreateFirstUser(t, client)
|
||||
workspaceOwnerClient, workspaceOwner = coderdtest.CreateAnotherUser(t, client, orgOwner.OrganizationID, rbac.ScopedRoleOrgAuditor(orgOwner.OrganizationID))
|
||||
workspace = dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
@@ -68,12 +64,8 @@ func TestSharingShare(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
DeploymentValues: coderdtest.DeploymentValues(t, func(dv *codersdk.DeploymentValues) {
|
||||
dv.Experiments = []string{string(codersdk.ExperimentWorkspaceSharing)}
|
||||
}),
|
||||
})
|
||||
orgOwner = coderdtest.CreateFirstUser(t, client)
|
||||
client, db = coderdtest.NewWithDatabase(t, nil)
|
||||
orgOwner = coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
workspaceOwnerClient, workspaceOwner = coderdtest.CreateAnotherUser(t, client, orgOwner.OrganizationID, rbac.ScopedRoleOrgAuditor(orgOwner.OrganizationID))
|
||||
workspace = dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
@@ -127,11 +119,7 @@ func TestSharingShare(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
DeploymentValues: coderdtest.DeploymentValues(t, func(dv *codersdk.DeploymentValues) {
|
||||
dv.Experiments = []string{string(codersdk.ExperimentWorkspaceSharing)}
|
||||
}),
|
||||
})
|
||||
client, db = coderdtest.NewWithDatabase(t, nil)
|
||||
orgOwner = coderdtest.CreateFirstUser(t, client)
|
||||
workspaceOwnerClient, workspaceOwner = coderdtest.CreateAnotherUser(t, client, orgOwner.OrganizationID, rbac.ScopedRoleOrgAuditor(orgOwner.OrganizationID))
|
||||
workspace = dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
@@ -182,11 +170,7 @@ func TestSharingStatus(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
DeploymentValues: coderdtest.DeploymentValues(t, func(dv *codersdk.DeploymentValues) {
|
||||
dv.Experiments = []string{string(codersdk.ExperimentWorkspaceSharing)}
|
||||
}),
|
||||
})
|
||||
client, db = coderdtest.NewWithDatabase(t, nil)
|
||||
orgOwner = coderdtest.CreateFirstUser(t, client)
|
||||
workspaceOwnerClient, workspaceOwner = coderdtest.CreateAnotherUser(t, client, orgOwner.OrganizationID, rbac.ScopedRoleOrgAuditor(orgOwner.OrganizationID))
|
||||
workspace = dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
@@ -230,11 +214,7 @@ func TestSharingRemove(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
DeploymentValues: coderdtest.DeploymentValues(t, func(dv *codersdk.DeploymentValues) {
|
||||
dv.Experiments = []string{string(codersdk.ExperimentWorkspaceSharing)}
|
||||
}),
|
||||
})
|
||||
client, db = coderdtest.NewWithDatabase(t, nil)
|
||||
orgOwner = coderdtest.CreateFirstUser(t, client)
|
||||
workspaceOwnerClient, workspaceOwner = coderdtest.CreateAnotherUser(t, client, orgOwner.OrganizationID, rbac.ScopedRoleOrgAuditor(orgOwner.OrganizationID))
|
||||
workspace = dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
@@ -291,11 +271,7 @@ func TestSharingRemove(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
DeploymentValues: coderdtest.DeploymentValues(t, func(dv *codersdk.DeploymentValues) {
|
||||
dv.Experiments = []string{string(codersdk.ExperimentWorkspaceSharing)}
|
||||
}),
|
||||
})
|
||||
client, db = coderdtest.NewWithDatabase(t, nil)
|
||||
orgOwner = coderdtest.CreateFirstUser(t, client)
|
||||
workspaceOwnerClient, workspaceOwner = coderdtest.CreateAnotherUser(t, client, orgOwner.OrganizationID, rbac.ScopedRoleOrgAuditor(orgOwner.OrganizationID))
|
||||
workspace = dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
|
||||
+1
-1
@@ -120,7 +120,7 @@ func (r *RootCmd) start() *serpent.Command {
|
||||
func buildWorkspaceStartRequest(inv *serpent.Invocation, client *codersdk.Client, workspace codersdk.Workspace, parameterFlags workspaceParameterFlags, buildFlags buildFlags, action WorkspaceCLIAction) (codersdk.CreateWorkspaceBuildRequest, error) {
|
||||
version := workspace.LatestBuild.TemplateVersionID
|
||||
|
||||
if workspace.AutomaticUpdates == codersdk.AutomaticUpdatesAlways || action == WorkspaceUpdate {
|
||||
if workspace.AutomaticUpdates == codersdk.AutomaticUpdatesAlways || workspace.TemplateRequireActiveVersion || action == WorkspaceUpdate {
|
||||
version = workspace.TemplateActiveVersionID
|
||||
if version != workspace.LatestBuild.TemplateVersionID {
|
||||
action = WorkspaceUpdate
|
||||
|
||||
+4
-4
@@ -33,7 +33,7 @@ func TestStatePull(t *testing.T) {
|
||||
OrganizationID: owner.OrganizationID,
|
||||
OwnerID: taUser.ID,
|
||||
}).
|
||||
Seed(database.WorkspaceBuild{ProvisionerState: wantState}).
|
||||
Seed(database.WorkspaceBuild{}).ProvisionerState(wantState).
|
||||
Do()
|
||||
statefilePath := filepath.Join(t.TempDir(), "state")
|
||||
inv, root := clitest.New(t, "state", "pull", r.Workspace.Name, statefilePath)
|
||||
@@ -54,7 +54,7 @@ func TestStatePull(t *testing.T) {
|
||||
OrganizationID: owner.OrganizationID,
|
||||
OwnerID: taUser.ID,
|
||||
}).
|
||||
Seed(database.WorkspaceBuild{ProvisionerState: wantState}).
|
||||
Seed(database.WorkspaceBuild{}).ProvisionerState(wantState).
|
||||
Do()
|
||||
inv, root := clitest.New(t, "state", "pull", r.Workspace.Name)
|
||||
var gotState bytes.Buffer
|
||||
@@ -74,7 +74,7 @@ func TestStatePull(t *testing.T) {
|
||||
OrganizationID: owner.OrganizationID,
|
||||
OwnerID: taUser.ID,
|
||||
}).
|
||||
Seed(database.WorkspaceBuild{ProvisionerState: wantState}).
|
||||
Seed(database.WorkspaceBuild{}).ProvisionerState(wantState).
|
||||
Do()
|
||||
inv, root := clitest.New(t, "state", "pull", taUser.Username+"/"+r.Workspace.Name,
|
||||
"--build", fmt.Sprintf("%d", r.Build.BuildNumber))
|
||||
@@ -170,7 +170,7 @@ func TestStatePush(t *testing.T) {
|
||||
OrganizationID: owner.OrganizationID,
|
||||
OwnerID: taUser.ID,
|
||||
}).
|
||||
Seed(database.WorkspaceBuild{ProvisionerState: initialState}).
|
||||
Seed(database.WorkspaceBuild{}).ProvisionerState(initialState).
|
||||
Do()
|
||||
wantState := []byte("updated state")
|
||||
stateFile, err := os.CreateTemp(t.TempDir(), "")
|
||||
|
||||
+9
-7
@@ -1,5 +1,3 @@
|
||||
//go:build !windows
|
||||
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
@@ -7,6 +5,7 @@ import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -25,12 +24,15 @@ func setupSocketServer(t *testing.T) (path string, cleanup func()) {
|
||||
t.Helper()
|
||||
|
||||
// Use a temporary socket path for each test
|
||||
socketPath := filepath.Join(testutil.TempDirUnixSocket(t), "test.sock")
|
||||
socketPath := testutil.AgentSocketPath(t)
|
||||
|
||||
// Create parent directory if needed
|
||||
parentDir := filepath.Dir(socketPath)
|
||||
err := os.MkdirAll(parentDir, 0o700)
|
||||
require.NoError(t, err, "create socket directory")
|
||||
// Create parent directory if needed. Not necessary on Windows because named pipes live in an abstract namespace
|
||||
// not tied to any real files.
|
||||
if runtime.GOOS != "windows" {
|
||||
parentDir := filepath.Dir(socketPath)
|
||||
err := os.MkdirAll(parentDir, 0o700)
|
||||
require.NoError(t, err, "create socket directory")
|
||||
}
|
||||
|
||||
server, err := agentsocket.NewServer(
|
||||
slog.Make().Leveled(slog.LevelDebug),
|
||||
|
||||
@@ -18,6 +18,7 @@ func (r *RootCmd) tasksCommand() *serpent.Command {
|
||||
r.taskList(),
|
||||
r.taskLogs(),
|
||||
r.taskPause(),
|
||||
r.taskResume(),
|
||||
r.taskSend(),
|
||||
r.taskStatus(),
|
||||
},
|
||||
|
||||
@@ -0,0 +1,95 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/pretty"
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
|
||||
func (r *RootCmd) taskResume() *serpent.Command {
|
||||
var noWait bool
|
||||
|
||||
cmd := &serpent.Command{
|
||||
Use: "resume <task>",
|
||||
Short: "Resume a task",
|
||||
Long: FormatExamples(
|
||||
Example{
|
||||
Description: "Resume a task by name",
|
||||
Command: "coder task resume my-task",
|
||||
},
|
||||
Example{
|
||||
Description: "Resume another user's task",
|
||||
Command: "coder task resume alice/my-task",
|
||||
},
|
||||
Example{
|
||||
Description: "Resume a task without confirmation",
|
||||
Command: "coder task resume my-task --yes",
|
||||
},
|
||||
),
|
||||
Middleware: serpent.Chain(
|
||||
serpent.RequireNArgs(1),
|
||||
),
|
||||
Options: serpent.OptionSet{
|
||||
{
|
||||
Flag: "no-wait",
|
||||
Description: "Return immediately after resuming the task.",
|
||||
Value: serpent.BoolOf(&noWait),
|
||||
},
|
||||
cliui.SkipPromptOption(),
|
||||
},
|
||||
Handler: func(inv *serpent.Invocation) error {
|
||||
ctx := inv.Context()
|
||||
client, err := r.InitClient(inv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
task, err := client.TaskByIdentifier(ctx, inv.Args[0])
|
||||
if err != nil {
|
||||
return xerrors.Errorf("resolve task %q: %w", inv.Args[0], err)
|
||||
}
|
||||
|
||||
display := fmt.Sprintf("%s/%s", task.OwnerName, task.Name)
|
||||
|
||||
if task.Status == codersdk.TaskStatusError || task.Status == codersdk.TaskStatusUnknown {
|
||||
return xerrors.Errorf("task %q is in %s state and cannot be resumed; check the workspace build logs and agent status for details", display, task.Status)
|
||||
} else if task.Status != codersdk.TaskStatusPaused {
|
||||
return xerrors.Errorf("task %q cannot be resumed (current status: %s)", display, task.Status)
|
||||
}
|
||||
|
||||
_, err = cliui.Prompt(inv, cliui.PromptOptions{
|
||||
Text: fmt.Sprintf("Resume task %s?", pretty.Sprint(cliui.DefaultStyles.Code, display)),
|
||||
IsConfirm: true,
|
||||
Default: cliui.ConfirmNo,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := client.ResumeTask(ctx, task.OwnerName, task.ID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("resume task %q: %w", display, err)
|
||||
} else if resp.WorkspaceBuild == nil {
|
||||
return xerrors.Errorf("resume task %q: no workspace build returned", display)
|
||||
}
|
||||
|
||||
if noWait {
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "Resuming task %q in the background.\n", cliui.Keyword(display))
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = cliui.WorkspaceBuild(ctx, inv.Stdout, client, resp.WorkspaceBuild.ID); err != nil {
|
||||
return xerrors.Errorf("watch resume build for task %q: %w", display, err)
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "\nThe %s task has been resumed.\n", cliui.Keyword(display))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
@@ -0,0 +1,183 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
func TestExpTaskResume(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// pauseTask is a helper that pauses a task and waits for the stop
|
||||
// build to complete.
|
||||
pauseTask := func(ctx context.Context, t *testing.T, client *codersdk.Client, task codersdk.Task) {
|
||||
t.Helper()
|
||||
|
||||
pauseResp, err := client.PauseTask(ctx, task.OwnerName, task.ID)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, pauseResp.WorkspaceBuild)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, pauseResp.WorkspaceBuild.ID)
|
||||
}
|
||||
|
||||
t.Run("WithYesFlag", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Given: A paused task
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
_, userClient, task := setupCLITaskTest(setupCtx, t, nil)
|
||||
pauseTask(setupCtx, t, userClient, task)
|
||||
|
||||
// When: We attempt to resume the task
|
||||
inv, root := clitest.New(t, "task", "resume", task.Name, "--yes")
|
||||
output := clitest.Capture(inv)
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
// Then: We expect the task to be resumed
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, output.Stdout(), "has been resumed")
|
||||
|
||||
updated, err := userClient.TaskByIdentifier(ctx, task.Name)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, codersdk.TaskStatusInitializing, updated.Status)
|
||||
})
|
||||
|
||||
// OtherUserTask verifies that an admin can resume a task owned by
|
||||
// another user using the "owner/name" identifier format.
|
||||
t.Run("OtherUserTask", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Given: A different user's paused task
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
adminClient, userClient, task := setupCLITaskTest(setupCtx, t, nil)
|
||||
pauseTask(setupCtx, t, userClient, task)
|
||||
|
||||
// When: We attempt to resume their task
|
||||
identifier := fmt.Sprintf("%s/%s", task.OwnerName, task.Name)
|
||||
inv, root := clitest.New(t, "task", "resume", identifier, "--yes")
|
||||
output := clitest.Capture(inv)
|
||||
clitest.SetupConfig(t, adminClient, root)
|
||||
|
||||
// Then: We expect the task to be resumed
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, output.Stdout(), "has been resumed")
|
||||
|
||||
updated, err := adminClient.TaskByIdentifier(ctx, identifier)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, codersdk.TaskStatusInitializing, updated.Status)
|
||||
})
|
||||
|
||||
t.Run("NoWait", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Given: A paused task
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
_, userClient, task := setupCLITaskTest(setupCtx, t, nil)
|
||||
pauseTask(setupCtx, t, userClient, task)
|
||||
|
||||
// When: We attempt to resume the task (and specify no wait)
|
||||
inv, root := clitest.New(t, "task", "resume", task.Name, "--yes", "--no-wait")
|
||||
output := clitest.Capture(inv)
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
// Then: We expect the task to be resumed in the background
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, output.Stdout(), "in the background")
|
||||
|
||||
// And: The task to eventually be resumed
|
||||
require.True(t, task.WorkspaceID.Valid, "task should have a workspace ID")
|
||||
ws := coderdtest.MustWorkspace(t, userClient, task.WorkspaceID.UUID)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, userClient, ws.LatestBuild.ID)
|
||||
|
||||
updated, err := userClient.TaskByIdentifier(ctx, task.Name)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, codersdk.TaskStatusInitializing, updated.Status)
|
||||
})
|
||||
|
||||
t.Run("PromptConfirm", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Given: A paused task
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
_, userClient, task := setupCLITaskTest(setupCtx, t, nil)
|
||||
pauseTask(setupCtx, t, userClient, task)
|
||||
|
||||
// When: We attempt to resume the task
|
||||
inv, root := clitest.New(t, "task", "resume", task.Name)
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
// And: We confirm we want to resume the task
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
inv = inv.WithContext(ctx)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
w := clitest.StartWithWaiter(t, inv)
|
||||
pty.ExpectMatchContext(ctx, "Resume task")
|
||||
pty.WriteLine("yes")
|
||||
|
||||
// Then: We expect the task to be resumed
|
||||
pty.ExpectMatchContext(ctx, "has been resumed")
|
||||
require.NoError(t, w.Wait())
|
||||
|
||||
updated, err := userClient.TaskByIdentifier(ctx, task.Name)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, codersdk.TaskStatusInitializing, updated.Status)
|
||||
})
|
||||
|
||||
t.Run("PromptDecline", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Given: A paused task
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
_, userClient, task := setupCLITaskTest(setupCtx, t, nil)
|
||||
pauseTask(setupCtx, t, userClient, task)
|
||||
|
||||
// When: We attempt to resume the task
|
||||
inv, root := clitest.New(t, "task", "resume", task.Name)
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
// But: Say no at the confirmation screen
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
inv = inv.WithContext(ctx)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
w := clitest.StartWithWaiter(t, inv)
|
||||
pty.ExpectMatchContext(ctx, "Resume task")
|
||||
pty.WriteLine("no")
|
||||
require.Error(t, w.Wait())
|
||||
|
||||
// Then: We expect the task to still be paused
|
||||
updated, err := userClient.TaskByIdentifier(ctx, task.Name)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, codersdk.TaskStatusPaused, updated.Status)
|
||||
})
|
||||
|
||||
t.Run("TaskNotPaused", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Given: A running task
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
_, userClient, task := setupCLITaskTest(setupCtx, t, nil)
|
||||
|
||||
// When: We attempt to resume the task that is not paused
|
||||
inv, root := clitest.New(t, "task", "resume", task.Name, "--yes")
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
// Then: We expect to get an error that the task is not paused
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.ErrorContains(t, err, "cannot be resumed")
|
||||
})
|
||||
}
|
||||
@@ -137,6 +137,23 @@ func Test_Tasks(t *testing.T) {
|
||||
require.Equal(t, codersdk.TaskStatusPaused, task.Status, "task should be paused")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "resume task",
|
||||
cmdArgs: []string{"task", "resume", taskName, "--yes"},
|
||||
assertFn: func(stdout string, userClient *codersdk.Client) {
|
||||
require.Contains(t, stdout, "has been resumed", "resume output should confirm task was resumed")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "get task status after resume",
|
||||
cmdArgs: []string{"task", "status", taskName, "--output", "json"},
|
||||
assertFn: func(stdout string, userClient *codersdk.Client) {
|
||||
var task codersdk.Task
|
||||
require.NoError(t, json.NewDecoder(strings.NewReader(stdout)).Decode(&task), "should unmarshal task status")
|
||||
require.Equal(t, taskName, task.Name, "task name should match")
|
||||
require.Equal(t, codersdk.TaskStatusInitializing, task.Status, "task should be initializing after resume")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "delete task",
|
||||
cmdArgs: []string{"task", "delete", taskName, "--yes"},
|
||||
|
||||
@@ -139,8 +139,10 @@ func (r *RootCmd) templateVersionsList() *serpent.Command {
|
||||
type templateVersionRow struct {
|
||||
// For json format:
|
||||
TemplateVersion codersdk.TemplateVersion `table:"-"`
|
||||
ActiveJSON bool `json:"active" table:"-"`
|
||||
|
||||
// For table format:
|
||||
ID string `json:"-" table:"id"`
|
||||
Name string `json:"-" table:"name,default_sort"`
|
||||
CreatedAt time.Time `json:"-" table:"created at"`
|
||||
CreatedBy string `json:"-" table:"created by"`
|
||||
@@ -166,6 +168,8 @@ func templateVersionsToRows(activeVersionID uuid.UUID, templateVersions ...coder
|
||||
|
||||
rows[i] = templateVersionRow{
|
||||
TemplateVersion: templateVersion,
|
||||
ActiveJSON: templateVersion.ID == activeVersionID,
|
||||
ID: templateVersion.ID.String(),
|
||||
Name: templateVersion.Name,
|
||||
CreatedAt: templateVersion.CreatedAt,
|
||||
CreatedBy: templateVersion.CreatedBy.Username,
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -40,6 +42,33 @@ func TestTemplateVersions(t *testing.T) {
|
||||
pty.ExpectMatch(version.CreatedBy.Username)
|
||||
pty.ExpectMatch("Active")
|
||||
})
|
||||
|
||||
t.Run("ListVersionsJSON", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
member, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, nil)
|
||||
_ = coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
|
||||
inv, root := clitest.New(t, "templates", "versions", "list", template.Name, "--output", "json")
|
||||
clitest.SetupConfig(t, member, root)
|
||||
|
||||
var stdout bytes.Buffer
|
||||
inv.Stdout = &stdout
|
||||
|
||||
require.NoError(t, inv.Run())
|
||||
|
||||
var rows []struct {
|
||||
TemplateVersion codersdk.TemplateVersion `json:"TemplateVersion"`
|
||||
Active bool `json:"active"`
|
||||
}
|
||||
require.NoError(t, json.Unmarshal(stdout.Bytes(), &rows))
|
||||
require.Len(t, rows, 1)
|
||||
assert.Equal(t, version.ID, rows[0].TemplateVersion.ID)
|
||||
assert.True(t, rows[0].Active)
|
||||
})
|
||||
}
|
||||
|
||||
func TestTemplateVersionsPromote(t *testing.T) {
|
||||
|
||||
+13
-5
@@ -49,10 +49,9 @@ OPTIONS:
|
||||
security purposes if a --wildcard-access-url is configured.
|
||||
|
||||
--disable-workspace-sharing bool, $CODER_DISABLE_WORKSPACE_SHARING
|
||||
Disable workspace sharing (requires the "workspace-sharing" experiment
|
||||
to be enabled). Workspace ACL checking is disabled and only owners can
|
||||
have ssh, apps and terminal access to workspaces. Access based on the
|
||||
'owner' role is also allowed unless disabled via
|
||||
Disable workspace sharing. Workspace ACL checking is disabled and only
|
||||
owners can have ssh, apps and terminal access to workspaces. Access
|
||||
based on the 'owner' role is also allowed unless disabled via
|
||||
--disable-owner-workspace-access.
|
||||
|
||||
--swagger-enable bool, $CODER_SWAGGER_ENABLE
|
||||
@@ -63,6 +62,9 @@ OPTIONS:
|
||||
Separate multiple experiments with commas, or enter '*' to opt-in to
|
||||
all available experiments.
|
||||
|
||||
--external-auth-github-default-provider-enable bool, $CODER_EXTERNAL_AUTH_GITHUB_DEFAULT_PROVIDER_ENABLE (default: true)
|
||||
Enable the default GitHub external auth provider managed by Coder.
|
||||
|
||||
--postgres-auth password|awsiamrds, $CODER_PG_AUTH (default: password)
|
||||
Type of auth to use when connecting to postgres. For AWS RDS, using
|
||||
IAM authentication (awsiamrds) is recommended.
|
||||
@@ -383,13 +385,19 @@ NETWORKING OPTIONS:
|
||||
--samesite-auth-cookie lax|none, $CODER_SAMESITE_AUTH_COOKIE (default: lax)
|
||||
Controls the 'SameSite' property is set on browser session cookies.
|
||||
|
||||
--secure-auth-cookie bool, $CODER_SECURE_AUTH_COOKIE
|
||||
--secure-auth-cookie bool, $CODER_SECURE_AUTH_COOKIE (default: false)
|
||||
Controls if the 'Secure' property is set on browser session cookies.
|
||||
|
||||
--wildcard-access-url string, $CODER_WILDCARD_ACCESS_URL
|
||||
Specifies the wildcard hostname to use for workspace applications in
|
||||
the form "*.example.com".
|
||||
|
||||
--host-prefix-cookie bool, $CODER_HOST_PREFIX_COOKIE (default: false)
|
||||
Recommended to be enabled. Enables `__Host-` prefix for cookies to
|
||||
guarantee they are only set by the right domain. This change is
|
||||
disruptive to any workspaces built before release 1.31, requiring a
|
||||
workspace restart.
|
||||
|
||||
NETWORKING / DERP OPTIONS:
|
||||
Most Coder deployments never have to think about DERP because all connections
|
||||
between workspaces and users are peer-to-peer. However, when Coder cannot
|
||||
|
||||
+1
@@ -13,6 +13,7 @@ SUBCOMMANDS:
|
||||
list List tasks
|
||||
logs Show a task's logs
|
||||
pause Pause a task
|
||||
resume Resume a task
|
||||
send Send input to a task
|
||||
status Show the status of a task.
|
||||
|
||||
|
||||
+28
@@ -0,0 +1,28 @@
|
||||
coder v0.0.0-devel
|
||||
|
||||
USAGE:
|
||||
coder task resume [flags] <task>
|
||||
|
||||
Resume a task
|
||||
|
||||
- Resume a task by name:
|
||||
|
||||
$ coder task resume my-task
|
||||
|
||||
- Resume another user's task:
|
||||
|
||||
$ coder task resume alice/my-task
|
||||
|
||||
- Resume a task without confirmation:
|
||||
|
||||
$ coder task resume my-task --yes
|
||||
|
||||
OPTIONS:
|
||||
--no-wait bool
|
||||
Return immediately after resuming the task.
|
||||
|
||||
-y, --yes bool
|
||||
Bypass confirmation prompts.
|
||||
|
||||
———
|
||||
Run `coder --help` for a list of global options.
|
||||
@@ -9,7 +9,7 @@ OPTIONS:
|
||||
-O, --org string, $CODER_ORGANIZATION
|
||||
Select which organization (uuid or name) to use.
|
||||
|
||||
-c, --column [name|created at|created by|status|active|archived] (default: name,created at,created by,status,active)
|
||||
-c, --column [id|name|created at|created by|status|active|archived] (default: name,created at,created by,status,active)
|
||||
Columns to display in table output.
|
||||
|
||||
--include-archived bool
|
||||
|
||||
+1
-1
@@ -27,7 +27,7 @@ USAGE:
|
||||
SUBCOMMANDS:
|
||||
create Create a token
|
||||
list List tokens
|
||||
remove Delete a token
|
||||
remove Expire or delete a token
|
||||
view Display detailed information about a token
|
||||
|
||||
———
|
||||
|
||||
@@ -15,6 +15,10 @@ OPTIONS:
|
||||
-c, --column [id|name|scopes|allow list|last used|expires at|created at|owner] (default: id,name,scopes,allow list,last used,expires at,created at)
|
||||
Columns to display in table output.
|
||||
|
||||
--include-expired bool
|
||||
Include expired tokens in the output. By default, expired tokens are
|
||||
hidden.
|
||||
|
||||
-o, --output table|json (default: table)
|
||||
Output format.
|
||||
|
||||
|
||||
+10
-2
@@ -1,11 +1,19 @@
|
||||
coder v0.0.0-devel
|
||||
|
||||
USAGE:
|
||||
coder tokens remove <name|id|token>
|
||||
coder tokens remove [flags] <name|id|token>
|
||||
|
||||
Delete a token
|
||||
Expire or delete a token
|
||||
|
||||
Aliases: delete, rm
|
||||
|
||||
Remove a token by expiring it. Use --delete to permanently hard-delete the
|
||||
token instead.
|
||||
|
||||
OPTIONS:
|
||||
--delete bool
|
||||
Permanently delete the token instead of expiring it. This removes the
|
||||
audit trail.
|
||||
|
||||
———
|
||||
Run `coder --help` for a list of global options.
|
||||
|
||||
+18
-5
@@ -176,11 +176,16 @@ networking:
|
||||
# (default: <unset>, type: string-array)
|
||||
proxyTrustedOrigins: []
|
||||
# Controls if the 'Secure' property is set on browser session cookies.
|
||||
# (default: <unset>, type: bool)
|
||||
# (default: false, type: bool)
|
||||
secureAuthCookie: false
|
||||
# Controls the 'SameSite' property is set on browser session cookies.
|
||||
# (default: lax, type: enum[lax\|none])
|
||||
sameSiteAuthCookie: lax
|
||||
# Recommended to be enabled. Enables `__Host-` prefix for cookies to guarantee
|
||||
# they are only set by the right domain. This change is disruptive to any
|
||||
# workspaces built before release 1.31, requiring a workspace restart.
|
||||
# (default: false, type: bool)
|
||||
hostPrefixCookie: false
|
||||
# Whether Coder only allows connections to workspaces via the browser.
|
||||
# (default: <unset>, type: bool)
|
||||
browserOnly: false
|
||||
@@ -417,6 +422,11 @@ oidc:
|
||||
# an insecure OIDC configuration. It is not recommended to use this flag.
|
||||
# (default: <unset>, type: bool)
|
||||
dangerousSkipIssuerChecks: false
|
||||
# Optional override of the default redirect url which uses the deployment's access
|
||||
# url. Useful in situations where a deployment has more than 1 domain. Using this
|
||||
# setting can also break OIDC, so use with caution.
|
||||
# (default: <unset>, type: url)
|
||||
oidc-redirect-url:
|
||||
# Telemetry is critical to our ability to improve Coder. We strip all personal
|
||||
# information before sending data to our servers. Please only disable telemetry
|
||||
# when required by your organization's security policy.
|
||||
@@ -514,10 +524,10 @@ disablePathApps: false
|
||||
# workspaces.
|
||||
# (default: <unset>, type: bool)
|
||||
disableOwnerWorkspaceAccess: false
|
||||
# Disable workspace sharing (requires the "workspace-sharing" experiment to be
|
||||
# enabled). Workspace ACL checking is disabled and only owners can have ssh, apps
|
||||
# and terminal access to workspaces. Access based on the 'owner' role is also
|
||||
# allowed unless disabled via --disable-owner-workspace-access.
|
||||
# Disable workspace sharing. Workspace ACL checking is disabled and only owners
|
||||
# can have ssh, apps and terminal access to workspaces. Access based on the
|
||||
# 'owner' role is also allowed unless disabled via
|
||||
# --disable-owner-workspace-access.
|
||||
# (default: <unset>, type: bool)
|
||||
disableWorkspaceSharing: false
|
||||
# These options change the behavior of how clients interact with the Coder.
|
||||
@@ -554,6 +564,9 @@ supportLinks: []
|
||||
# External Authentication providers.
|
||||
# (default: <unset>, type: struct[[]codersdk.ExternalAuthConfig])
|
||||
externalAuthProviders: []
|
||||
# Enable the default GitHub external auth provider managed by Coder.
|
||||
# (default: true, type: bool)
|
||||
externalAuthGithubDefaultProviderEnable: true
|
||||
# Hostname of HTTPS server that runs https://github.com/coder/wgtunnel. By
|
||||
# default, this will pick the best available wgtunnel server hosted by Coder. e.g.
|
||||
# "tunnel.example.com".
|
||||
|
||||
+37
-14
@@ -218,9 +218,10 @@ func (r *RootCmd) listTokens() *serpent.Command {
|
||||
}
|
||||
|
||||
var (
|
||||
all bool
|
||||
displayTokens []tokenListRow
|
||||
formatter = cliui.NewOutputFormatter(
|
||||
all bool
|
||||
includeExpired bool
|
||||
displayTokens []tokenListRow
|
||||
formatter = cliui.NewOutputFormatter(
|
||||
cliui.TableFormat([]tokenListRow{}, defaultCols),
|
||||
cliui.JSONFormat(),
|
||||
)
|
||||
@@ -240,7 +241,8 @@ func (r *RootCmd) listTokens() *serpent.Command {
|
||||
}
|
||||
|
||||
tokens, err := client.Tokens(inv.Context(), codersdk.Me, codersdk.TokensFilter{
|
||||
IncludeAll: all,
|
||||
IncludeAll: all,
|
||||
IncludeExpired: includeExpired,
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("list tokens: %w", err)
|
||||
@@ -274,6 +276,12 @@ func (r *RootCmd) listTokens() *serpent.Command {
|
||||
Description: "Specifies whether all users' tokens will be listed or not (must have Owner role to see all tokens).",
|
||||
Value: serpent.BoolOf(&all),
|
||||
},
|
||||
{
|
||||
Name: "include-expired",
|
||||
Flag: "include-expired",
|
||||
Description: "Include expired tokens in the output. By default, expired tokens are hidden.",
|
||||
Value: serpent.BoolOf(&includeExpired),
|
||||
},
|
||||
}
|
||||
|
||||
formatter.AttachOptions(&cmd.Options)
|
||||
@@ -323,10 +331,13 @@ func (r *RootCmd) viewToken() *serpent.Command {
|
||||
}
|
||||
|
||||
func (r *RootCmd) removeToken() *serpent.Command {
|
||||
var deleteToken bool
|
||||
cmd := &serpent.Command{
|
||||
Use: "remove <name|id|token>",
|
||||
Aliases: []string{"delete"},
|
||||
Short: "Delete a token",
|
||||
Short: "Expire or delete a token",
|
||||
Long: "Remove a token by expiring it. Use --delete to permanently hard-" +
|
||||
"delete the token instead.",
|
||||
Middleware: serpent.Chain(
|
||||
serpent.RequireNArgs(1),
|
||||
),
|
||||
@@ -338,7 +349,7 @@ func (r *RootCmd) removeToken() *serpent.Command {
|
||||
|
||||
token, err := client.APIKeyByName(inv.Context(), codersdk.Me, inv.Args[0])
|
||||
if err != nil {
|
||||
// If it's a token, we need to extract the ID
|
||||
// If it's a token, we need to extract the ID.
|
||||
maybeID := strings.Split(inv.Args[0], "-")[0]
|
||||
token, err = client.APIKeyByID(inv.Context(), codersdk.Me, maybeID)
|
||||
if err != nil {
|
||||
@@ -346,19 +357,31 @@ func (r *RootCmd) removeToken() *serpent.Command {
|
||||
}
|
||||
}
|
||||
|
||||
err = client.DeleteAPIKey(inv.Context(), codersdk.Me, token.ID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("delete api key: %w", err)
|
||||
if deleteToken {
|
||||
err = client.DeleteAPIKey(inv.Context(), codersdk.Me, token.ID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("delete api key: %w", err)
|
||||
}
|
||||
cliui.Infof(inv.Stdout, "Token has been deleted.")
|
||||
return nil
|
||||
}
|
||||
|
||||
cliui.Infof(
|
||||
inv.Stdout,
|
||||
"Token has been deleted.",
|
||||
)
|
||||
|
||||
err = client.ExpireAPIKey(inv.Context(), codersdk.Me, token.ID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("expire api key: %w", err)
|
||||
}
|
||||
cliui.Infof(inv.Stdout, "Token has been expired.")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Options = serpent.OptionSet{
|
||||
{
|
||||
Flag: "delete",
|
||||
Description: "Permanently delete the token instead of expiring it. This removes the audit trail.",
|
||||
Value: serpent.BoolOf(&deleteToken),
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
+144
-8
@@ -6,12 +6,16 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
@@ -22,7 +26,7 @@ func TestTokens(t *testing.T) {
|
||||
adminUser := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
secondUserClient, secondUser := coderdtest.CreateAnotherUser(t, client, adminUser.OrganizationID)
|
||||
_, thirdUser := coderdtest.CreateAnotherUser(t, client, adminUser.OrganizationID)
|
||||
thirdUserClient, thirdUser := coderdtest.CreateAnotherUser(t, client, adminUser.OrganizationID)
|
||||
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancelFunc()
|
||||
@@ -155,7 +159,7 @@ func TestTokens(t *testing.T) {
|
||||
require.Len(t, scopedToken.AllowList, 1)
|
||||
require.Equal(t, allowSpec, scopedToken.AllowList[0].String())
|
||||
|
||||
// Delete by name
|
||||
// Delete by name (default behavior is now expire)
|
||||
inv, root = clitest.New(t, "tokens", "rm", "token-one")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf = new(bytes.Buffer)
|
||||
@@ -164,10 +168,31 @@ func TestTokens(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
res = buf.String()
|
||||
require.NotEmpty(t, res)
|
||||
require.Contains(t, res, "deleted")
|
||||
require.Contains(t, res, "expired")
|
||||
|
||||
// Delete by ID
|
||||
// Regular users cannot expire other users' tokens (expire is default now).
|
||||
inv, root = clitest.New(t, "tokens", "rm", secondTokenID)
|
||||
clitest.SetupConfig(t, thirdUserClient, root)
|
||||
buf = new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
err = inv.WithContext(ctx).Run()
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "not found")
|
||||
|
||||
// Only admin users can expire other users' tokens (expire is default now).
|
||||
inv, root = clitest.New(t, "tokens", "rm", secondTokenID)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf = new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
err = inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
// Validate that token was expired
|
||||
if token, err := client.APIKeyByName(ctx, secondUser.ID.String(), "token-two"); assert.NoError(t, err) {
|
||||
require.True(t, token.ExpiresAt.Before(time.Now()))
|
||||
}
|
||||
|
||||
// Delete by ID (explicit delete flag)
|
||||
inv, root = clitest.New(t, "tokens", "rm", "--delete", secondTokenID)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf = new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
@@ -177,8 +202,8 @@ func TestTokens(t *testing.T) {
|
||||
require.NotEmpty(t, res)
|
||||
require.Contains(t, res, "deleted")
|
||||
|
||||
// Delete scoped token by ID
|
||||
inv, root = clitest.New(t, "tokens", "rm", scopedTokenID)
|
||||
// Delete scoped token by ID (explicit delete flag)
|
||||
inv, root = clitest.New(t, "tokens", "rm", "--delete", scopedTokenID)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf = new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
@@ -199,8 +224,8 @@ func TestTokens(t *testing.T) {
|
||||
require.NotEmpty(t, res)
|
||||
fourthToken := res
|
||||
|
||||
// Delete by token
|
||||
inv, root = clitest.New(t, "tokens", "rm", fourthToken)
|
||||
// Delete by token (explicit delete flag)
|
||||
inv, root = clitest.New(t, "tokens", "rm", "--delete", fourthToken)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf = new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
@@ -210,3 +235,114 @@ func TestTokens(t *testing.T) {
|
||||
require.NotEmpty(t, res)
|
||||
require.Contains(t, res, "deleted")
|
||||
}
|
||||
|
||||
func TestTokensListExpiredFiltering(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, _, api := coderdtest.NewWithAPI(t, nil)
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Create a valid (non-expired) token
|
||||
validToken, _ := dbgen.APIKey(t, api.Database, database.APIKey{
|
||||
UserID: owner.UserID,
|
||||
ExpiresAt: time.Now().Add(24 * time.Hour),
|
||||
LoginType: database.LoginTypeToken,
|
||||
TokenName: "valid-token",
|
||||
})
|
||||
|
||||
// Create an expired token
|
||||
expiredToken, _ := dbgen.APIKey(t, api.Database, database.APIKey{
|
||||
UserID: owner.UserID,
|
||||
ExpiresAt: time.Now().Add(-24 * time.Hour),
|
||||
LoginType: database.LoginTypeToken,
|
||||
TokenName: "expired-token",
|
||||
})
|
||||
|
||||
t.Run("HidesExpiredByDefault", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
|
||||
inv, root := clitest.New(t, "tokens", "ls")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf := new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
res := buf.String()
|
||||
require.Contains(t, res, validToken.ID)
|
||||
require.Contains(t, res, "valid-token")
|
||||
require.NotContains(t, res, expiredToken.ID)
|
||||
require.NotContains(t, res, "expired-token")
|
||||
})
|
||||
|
||||
t.Run("ShowsExpiredWithFlag", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
|
||||
inv, root := clitest.New(t, "tokens", "ls", "--include-expired")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf := new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
res := buf.String()
|
||||
require.Contains(t, res, validToken.ID)
|
||||
require.Contains(t, res, "valid-token")
|
||||
require.Contains(t, res, expiredToken.ID)
|
||||
require.Contains(t, res, "expired-token")
|
||||
})
|
||||
|
||||
t.Run("JSONOutputRespectsFilter", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
|
||||
// Default (no expired)
|
||||
inv, root := clitest.New(t, "tokens", "ls", "--output=json")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf := new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
res := buf.String()
|
||||
require.Contains(t, res, "valid-token")
|
||||
require.NotContains(t, res, "expired-token")
|
||||
|
||||
// With --include-expired
|
||||
inv, root = clitest.New(t, "tokens", "ls", "--output=json", "--include-expired")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf = new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
err = inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
res = buf.String()
|
||||
require.Contains(t, res, "valid-token")
|
||||
require.Contains(t, res, "expired-token")
|
||||
})
|
||||
|
||||
t.Run("AllUsersWithIncludeExpired", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
|
||||
inv, root := clitest.New(t, "tokens", "ls", "--all", "--include-expired")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf := new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
res := buf.String()
|
||||
// Should show both valid and expired tokens
|
||||
require.Contains(t, res, validToken.ID)
|
||||
require.Contains(t, res, "valid-token")
|
||||
require.Contains(t, res, expiredToken.ID)
|
||||
require.Contains(t, res, "expired-token")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -990,4 +990,74 @@ func TestUpdateValidateRichParameters(t *testing.T) {
|
||||
|
||||
_ = testutil.TryReceive(ctx, t, doneChan)
|
||||
})
|
||||
|
||||
t.Run("NewImmutableParameterViaFlag", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create template and workspace with only a mutable parameter.
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
member, memberUser := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
|
||||
templateParameters := []*proto.RichParameter{
|
||||
{Name: stringParameterName, Type: "string", Mutable: true, Required: true, Options: []*proto.RichParameterOption{
|
||||
{Name: "First option", Description: "This is first option", Value: "1st"},
|
||||
{Name: "Second option", Description: "This is second option", Value: "2nd"},
|
||||
}},
|
||||
}
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, prepareEchoResponses(templateParameters))
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
|
||||
inv, root := clitest.New(t, "create", "my-workspace", "--yes", "--template", template.Name, "--parameter", fmt.Sprintf("%s=%s", stringParameterName, "1st"))
|
||||
clitest.SetupConfig(t, member, root)
|
||||
err := inv.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Update template: add a new immutable parameter.
|
||||
updatedTemplateParameters := []*proto.RichParameter{
|
||||
templateParameters[0],
|
||||
{Name: immutableParameterName, Type: "string", Mutable: false, Required: true, Options: []*proto.RichParameterOption{
|
||||
{Name: "fir", Description: "First option for immutable parameter", Value: "I"},
|
||||
{Name: "sec", Description: "Second option for immutable parameter", Value: "II"},
|
||||
}},
|
||||
}
|
||||
|
||||
updatedVersion := coderdtest.UpdateTemplateVersion(t, client, owner.OrganizationID, prepareEchoResponses(updatedTemplateParameters), template.ID)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, updatedVersion.ID)
|
||||
err = client.UpdateActiveTemplateVersion(context.Background(), template.ID, codersdk.UpdateActiveTemplateVersion{
|
||||
ID: updatedVersion.ID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Update workspace, supplying the new immutable parameter via
|
||||
// the --parameter flag. This should succeed because it's the
|
||||
// first time this parameter is being set.
|
||||
inv, root = clitest.New(t, "update", "my-workspace",
|
||||
"--parameter", fmt.Sprintf("%s=%s", immutableParameterName, "II"))
|
||||
clitest.SetupConfig(t, member, root)
|
||||
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
doneChan := make(chan struct{})
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
pty.ExpectMatch("Planning workspace")
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
_ = testutil.TryReceive(ctx, t, doneChan)
|
||||
|
||||
// Verify the immutable parameter was set correctly.
|
||||
workspace, err := client.WorkspaceByOwnerAndName(ctx, memberUser.ID.String(), "my-workspace", codersdk.WorkspaceOptions{})
|
||||
require.NoError(t, err)
|
||||
actualParameters, err := client.WorkspaceBuildParameters(ctx, workspace.LatestBuild.ID)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, actualParameters, codersdk.WorkspaceBuildParameter{
|
||||
Name: immutableParameterName,
|
||||
Value: "II",
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -179,6 +179,8 @@ func New(opts Options, workspace database.Workspace) *API {
|
||||
Database: opts.Database,
|
||||
Log: opts.Log,
|
||||
PublishWorkspaceUpdateFn: api.publishWorkspaceUpdate,
|
||||
Clock: opts.Clock,
|
||||
NotificationsEnqueuer: opts.NotificationsEnqueuer,
|
||||
}
|
||||
|
||||
api.MetadataAPI = &MetadataAPI{
|
||||
|
||||
@@ -2,6 +2,10 @@ package agentapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/xerrors"
|
||||
@@ -9,7 +13,14 @@ import (
|
||||
"cdr.dev/slog/v3"
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/coderd/notifications"
|
||||
strutil "github.com/coder/coder/v2/coderd/util/strings"
|
||||
"github.com/coder/coder/v2/coderd/workspacestats"
|
||||
"github.com/coder/coder/v2/coderd/wspubsub"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/quartz"
|
||||
)
|
||||
|
||||
type AppsAPI struct {
|
||||
@@ -17,6 +28,8 @@ type AppsAPI struct {
|
||||
Database database.Store
|
||||
Log slog.Logger
|
||||
PublishWorkspaceUpdateFn func(context.Context, *database.WorkspaceAgent, wspubsub.WorkspaceEventKind) error
|
||||
NotificationsEnqueuer notifications.Enqueuer
|
||||
Clock quartz.Clock
|
||||
}
|
||||
|
||||
func (a *AppsAPI) BatchUpdateAppHealths(ctx context.Context, req *agentproto.BatchUpdateAppHealthRequest) (*agentproto.BatchUpdateAppHealthResponse, error) {
|
||||
@@ -104,3 +117,230 @@ func (a *AppsAPI) BatchUpdateAppHealths(ctx context.Context, req *agentproto.Bat
|
||||
}
|
||||
return &agentproto.BatchUpdateAppHealthResponse{}, nil
|
||||
}
|
||||
|
||||
func (a *AppsAPI) UpdateAppStatus(ctx context.Context, req *agentproto.UpdateAppStatusRequest) (*agentproto.UpdateAppStatusResponse, error) {
|
||||
if len(req.Message) > 160 {
|
||||
return nil, codersdk.NewError(http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Message is too long.",
|
||||
Detail: "Message must be less than 160 characters.",
|
||||
Validations: []codersdk.ValidationError{
|
||||
{Field: "message", Detail: "Message must be less than 160 characters."},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
var dbState database.WorkspaceAppStatusState
|
||||
switch req.State {
|
||||
case agentproto.UpdateAppStatusRequest_COMPLETE:
|
||||
dbState = database.WorkspaceAppStatusStateComplete
|
||||
case agentproto.UpdateAppStatusRequest_FAILURE:
|
||||
dbState = database.WorkspaceAppStatusStateFailure
|
||||
case agentproto.UpdateAppStatusRequest_WORKING:
|
||||
dbState = database.WorkspaceAppStatusStateWorking
|
||||
case agentproto.UpdateAppStatusRequest_IDLE:
|
||||
dbState = database.WorkspaceAppStatusStateIdle
|
||||
default:
|
||||
return nil, codersdk.NewError(http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Invalid state provided.",
|
||||
Detail: fmt.Sprintf("invalid state: %q", req.State),
|
||||
Validations: []codersdk.ValidationError{
|
||||
{Field: "state", Detail: "State must be one of: complete, failure, working, idle."},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
workspaceAgent, err := a.AgentFn(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
app, err := a.Database.GetWorkspaceAppByAgentIDAndSlug(ctx, database.GetWorkspaceAppByAgentIDAndSlugParams{
|
||||
AgentID: workspaceAgent.ID,
|
||||
Slug: req.Slug,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, codersdk.NewError(http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Failed to get workspace app.",
|
||||
Detail: fmt.Sprintf("No app found with slug %q", req.Slug),
|
||||
})
|
||||
}
|
||||
|
||||
workspace, err := a.Database.GetWorkspaceByAgentID(ctx, workspaceAgent.ID)
|
||||
if err != nil {
|
||||
return nil, codersdk.NewError(http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Failed to get workspace.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
// Treat the message as untrusted input.
|
||||
cleaned := strutil.UISanitize(req.Message)
|
||||
|
||||
// Get the latest status for the workspace app to detect no-op updates
|
||||
// nolint:gocritic // This is a system restricted operation.
|
||||
latestAppStatus, err := a.Database.GetLatestWorkspaceAppStatusByAppID(dbauthz.AsSystemRestricted(ctx), app.ID)
|
||||
if err != nil && !xerrors.Is(err, sql.ErrNoRows) {
|
||||
return nil, codersdk.NewError(http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to get latest workspace app status.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
}
|
||||
// If no rows found, latestAppStatus will be a zero-value struct (ID == uuid.Nil)
|
||||
|
||||
// nolint:gocritic // This is a system restricted operation.
|
||||
_, err = a.Database.InsertWorkspaceAppStatus(dbauthz.AsSystemRestricted(ctx), database.InsertWorkspaceAppStatusParams{
|
||||
ID: uuid.New(),
|
||||
CreatedAt: dbtime.Now(),
|
||||
WorkspaceID: workspace.ID,
|
||||
AgentID: workspaceAgent.ID,
|
||||
AppID: app.ID,
|
||||
State: dbState,
|
||||
Message: cleaned,
|
||||
Uri: sql.NullString{
|
||||
String: req.Uri,
|
||||
Valid: req.Uri != "",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, codersdk.NewError(http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to insert workspace app status.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
if a.PublishWorkspaceUpdateFn != nil {
|
||||
err = a.PublishWorkspaceUpdateFn(ctx, &workspaceAgent, wspubsub.WorkspaceEventKindAgentAppStatusUpdate)
|
||||
if err != nil {
|
||||
return nil, codersdk.NewError(http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to publish workspace update.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Notify on state change to Working/Idle for AI tasks
|
||||
a.enqueueAITaskStateNotification(ctx, app.ID, latestAppStatus, dbState, workspace, workspaceAgent)
|
||||
|
||||
if shouldBump(dbState, latestAppStatus) {
|
||||
// We pass time.Time{} for nextAutostart since we don't have access to
|
||||
// TemplateScheduleStore here. The activity bump logic handles this by
|
||||
// defaulting to the template's activity_bump duration (typically 1 hour).
|
||||
workspacestats.ActivityBumpWorkspace(ctx, a.Log, a.Database, workspace.ID, time.Time{})
|
||||
}
|
||||
// just return a blank response because it doesn't contain any settable fields at present.
|
||||
return new(agentproto.UpdateAppStatusResponse), nil
|
||||
}
|
||||
|
||||
func shouldBump(dbState database.WorkspaceAppStatusState, latestAppStatus database.WorkspaceAppStatus) bool {
|
||||
// Bump deadline when agent reports working or transitions away from working.
|
||||
// This prevents auto-pause during active work and gives users time to interact
|
||||
// after work completes.
|
||||
|
||||
// Bump if reporting working state.
|
||||
if dbState == database.WorkspaceAppStatusStateWorking {
|
||||
return true
|
||||
}
|
||||
|
||||
// Bump if transitioning away from working state.
|
||||
if latestAppStatus.ID != uuid.Nil {
|
||||
prevState := latestAppStatus.State
|
||||
if prevState == database.WorkspaceAppStatusStateWorking {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// enqueueAITaskStateNotification enqueues a notification when an AI task's app
|
||||
// transitions to Working or Idle.
|
||||
// No-op if:
|
||||
// - the workspace agent app isn't configured as an AI task,
|
||||
// - the new state equals the latest persisted state,
|
||||
// - the workspace agent is not ready (still starting up).
|
||||
func (a *AppsAPI) enqueueAITaskStateNotification(
|
||||
ctx context.Context,
|
||||
appID uuid.UUID,
|
||||
latestAppStatus database.WorkspaceAppStatus,
|
||||
newAppStatus database.WorkspaceAppStatusState,
|
||||
workspace database.Workspace,
|
||||
agent database.WorkspaceAgent,
|
||||
) {
|
||||
var notificationTemplate uuid.UUID
|
||||
switch newAppStatus {
|
||||
case database.WorkspaceAppStatusStateWorking:
|
||||
notificationTemplate = notifications.TemplateTaskWorking
|
||||
case database.WorkspaceAppStatusStateIdle:
|
||||
notificationTemplate = notifications.TemplateTaskIdle
|
||||
case database.WorkspaceAppStatusStateComplete:
|
||||
notificationTemplate = notifications.TemplateTaskCompleted
|
||||
case database.WorkspaceAppStatusStateFailure:
|
||||
notificationTemplate = notifications.TemplateTaskFailed
|
||||
default:
|
||||
// Not a notifiable state, do nothing
|
||||
return
|
||||
}
|
||||
|
||||
if !workspace.TaskID.Valid {
|
||||
// Workspace has no task ID, do nothing.
|
||||
return
|
||||
}
|
||||
|
||||
// Only send notifications when the agent is ready. We want to skip
|
||||
// any state transitions that occur whilst the workspace is starting
|
||||
// up as it doesn't make sense to receive them.
|
||||
if agent.LifecycleState != database.WorkspaceAgentLifecycleStateReady {
|
||||
a.Log.Debug(ctx, "skipping AI task notification because agent is not ready",
|
||||
slog.F("agent_id", agent.ID),
|
||||
slog.F("lifecycle_state", agent.LifecycleState),
|
||||
slog.F("new_app_status", newAppStatus),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
task, err := a.Database.GetTaskByID(ctx, workspace.TaskID.UUID)
|
||||
if err != nil {
|
||||
a.Log.Warn(ctx, "failed to get task", slog.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if !task.WorkspaceAppID.Valid || task.WorkspaceAppID.UUID != appID {
|
||||
// Non-task app, do nothing.
|
||||
return
|
||||
}
|
||||
|
||||
// Skip if the latest persisted state equals the new state (no new transition)
|
||||
// Note: uuid.Nil check is valid here. If no previous status exists,
|
||||
// GetLatestWorkspaceAppStatusByAppID returns sql.ErrNoRows and we get a zero-value struct.
|
||||
if latestAppStatus.ID != uuid.Nil && latestAppStatus.State == newAppStatus {
|
||||
return
|
||||
}
|
||||
|
||||
// Skip the initial "Working" notification when the task first starts.
|
||||
// This is obvious to the user since they just created the task.
|
||||
// We still notify on the first "Idle" status and all subsequent transitions.
|
||||
if latestAppStatus.ID == uuid.Nil && newAppStatus == database.WorkspaceAppStatusStateWorking {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := a.NotificationsEnqueuer.EnqueueWithData(
|
||||
// nolint:gocritic // Need notifier actor to enqueue notifications
|
||||
dbauthz.AsNotifier(ctx),
|
||||
workspace.OwnerID,
|
||||
notificationTemplate,
|
||||
map[string]string{
|
||||
"task": task.Name,
|
||||
"workspace": workspace.Name,
|
||||
},
|
||||
map[string]any{
|
||||
// Use a 1-minute bucketed timestamp to bypass per-day dedupe,
|
||||
// allowing identical content to resend within the same day
|
||||
// (but not more than once every 10s).
|
||||
"dedupe_bypass_ts": a.Clock.Now().UTC().Truncate(time.Minute),
|
||||
},
|
||||
"api-workspace-agent-app-status",
|
||||
// Associate this notification with related entities
|
||||
workspace.ID, workspace.OwnerID, workspace.OrganizationID, appID,
|
||||
); err != nil {
|
||||
a.Log.Warn(ctx, "failed to notify of task state", slog.Error(err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,115 @@
|
||||
package agentapi
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/util/ptr"
|
||||
)
|
||||
|
||||
func TestShouldBump(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
prevState *database.WorkspaceAppStatusState // nil means no previous state
|
||||
newState database.WorkspaceAppStatusState
|
||||
shouldBump bool
|
||||
}{
|
||||
{
|
||||
name: "FirstStatusBumps",
|
||||
prevState: nil,
|
||||
newState: database.WorkspaceAppStatusStateWorking,
|
||||
shouldBump: true,
|
||||
},
|
||||
{
|
||||
name: "WorkingToIdleBumps",
|
||||
prevState: ptr.Ref(database.WorkspaceAppStatusStateWorking),
|
||||
newState: database.WorkspaceAppStatusStateIdle,
|
||||
shouldBump: true,
|
||||
},
|
||||
{
|
||||
name: "WorkingToCompleteBumps",
|
||||
prevState: ptr.Ref(database.WorkspaceAppStatusStateWorking),
|
||||
newState: database.WorkspaceAppStatusStateComplete,
|
||||
shouldBump: true,
|
||||
},
|
||||
{
|
||||
name: "CompleteToIdleNoBump",
|
||||
prevState: ptr.Ref(database.WorkspaceAppStatusStateComplete),
|
||||
newState: database.WorkspaceAppStatusStateIdle,
|
||||
shouldBump: false,
|
||||
},
|
||||
{
|
||||
name: "CompleteToCompleteNoBump",
|
||||
prevState: ptr.Ref(database.WorkspaceAppStatusStateComplete),
|
||||
newState: database.WorkspaceAppStatusStateComplete,
|
||||
shouldBump: false,
|
||||
},
|
||||
{
|
||||
name: "FailureToIdleNoBump",
|
||||
prevState: ptr.Ref(database.WorkspaceAppStatusStateFailure),
|
||||
newState: database.WorkspaceAppStatusStateIdle,
|
||||
shouldBump: false,
|
||||
},
|
||||
{
|
||||
name: "FailureToFailureNoBump",
|
||||
prevState: ptr.Ref(database.WorkspaceAppStatusStateFailure),
|
||||
newState: database.WorkspaceAppStatusStateFailure,
|
||||
shouldBump: false,
|
||||
},
|
||||
{
|
||||
name: "CompleteToWorkingBumps",
|
||||
prevState: ptr.Ref(database.WorkspaceAppStatusStateComplete),
|
||||
newState: database.WorkspaceAppStatusStateWorking,
|
||||
shouldBump: true,
|
||||
},
|
||||
{
|
||||
name: "FailureToCompleteNoBump",
|
||||
prevState: ptr.Ref(database.WorkspaceAppStatusStateFailure),
|
||||
newState: database.WorkspaceAppStatusStateComplete,
|
||||
shouldBump: false,
|
||||
},
|
||||
{
|
||||
name: "WorkingToFailureBumps",
|
||||
prevState: ptr.Ref(database.WorkspaceAppStatusStateWorking),
|
||||
newState: database.WorkspaceAppStatusStateFailure,
|
||||
shouldBump: true,
|
||||
},
|
||||
{
|
||||
name: "IdleToIdleNoBump",
|
||||
prevState: ptr.Ref(database.WorkspaceAppStatusStateIdle),
|
||||
newState: database.WorkspaceAppStatusStateIdle,
|
||||
shouldBump: false,
|
||||
},
|
||||
{
|
||||
name: "IdleToWorkingBumps",
|
||||
prevState: ptr.Ref(database.WorkspaceAppStatusStateIdle),
|
||||
newState: database.WorkspaceAppStatusStateWorking,
|
||||
shouldBump: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var prevAppStatus database.WorkspaceAppStatus
|
||||
// If there's a previous state, report it first.
|
||||
if tt.prevState != nil {
|
||||
prevAppStatus.ID = uuid.UUID{1}
|
||||
prevAppStatus.State = *tt.prevState
|
||||
}
|
||||
|
||||
didBump := shouldBump(tt.newState, prevAppStatus)
|
||||
if tt.shouldBump {
|
||||
require.True(t, didBump, "wanted deadline to bump but it didn't")
|
||||
} else {
|
||||
require.False(t, didBump, "wanted deadline not to bump but it did")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -2,9 +2,13 @@ package agentapi_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/mock/gomock"
|
||||
|
||||
@@ -12,8 +16,12 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/agentapi"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbmock"
|
||||
"github.com/coder/coder/v2/coderd/notifications"
|
||||
"github.com/coder/coder/v2/coderd/notifications/notificationstest"
|
||||
"github.com/coder/coder/v2/coderd/wspubsub"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
"github.com/coder/quartz"
|
||||
)
|
||||
|
||||
func TestBatchUpdateAppHealths(t *testing.T) {
|
||||
@@ -253,3 +261,183 @@ func TestBatchUpdateAppHealths(t *testing.T) {
|
||||
require.Nil(t, resp)
|
||||
})
|
||||
}
|
||||
|
||||
func TestWorkspaceAgentAppStatus(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("Success", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
ctrl := gomock.NewController(t)
|
||||
mDB := dbmock.NewMockStore(ctrl)
|
||||
fEnq := ¬ificationstest.FakeEnqueuer{}
|
||||
mClock := quartz.NewMock(t)
|
||||
agent := database.WorkspaceAgent{
|
||||
ID: uuid.UUID{2},
|
||||
LifecycleState: database.WorkspaceAgentLifecycleStateReady,
|
||||
}
|
||||
workspaceUpdates := make(chan wspubsub.WorkspaceEventKind, 100)
|
||||
|
||||
api := &agentapi.AppsAPI{
|
||||
AgentFn: func(context.Context) (database.WorkspaceAgent, error) {
|
||||
return agent, nil
|
||||
},
|
||||
Database: mDB,
|
||||
Log: testutil.Logger(t),
|
||||
PublishWorkspaceUpdateFn: func(_ context.Context, agnt *database.WorkspaceAgent, kind wspubsub.WorkspaceEventKind) error {
|
||||
assert.Equal(t, *agnt, agent)
|
||||
testutil.AssertSend(ctx, t, workspaceUpdates, kind)
|
||||
return nil
|
||||
},
|
||||
NotificationsEnqueuer: fEnq,
|
||||
Clock: mClock,
|
||||
}
|
||||
|
||||
app := database.WorkspaceApp{
|
||||
ID: uuid.UUID{8},
|
||||
}
|
||||
mDB.EXPECT().GetWorkspaceAppByAgentIDAndSlug(gomock.Any(), database.GetWorkspaceAppByAgentIDAndSlugParams{
|
||||
AgentID: agent.ID,
|
||||
Slug: "vscode",
|
||||
}).Times(1).Return(app, nil)
|
||||
task := database.Task{
|
||||
ID: uuid.UUID{7},
|
||||
WorkspaceAppID: uuid.NullUUID{
|
||||
Valid: true,
|
||||
UUID: app.ID,
|
||||
},
|
||||
}
|
||||
mDB.EXPECT().GetTaskByID(gomock.Any(), task.ID).Times(1).Return(task, nil)
|
||||
workspace := database.Workspace{
|
||||
ID: uuid.UUID{9},
|
||||
TaskID: uuid.NullUUID{
|
||||
Valid: true,
|
||||
UUID: task.ID,
|
||||
},
|
||||
}
|
||||
mDB.EXPECT().GetWorkspaceByAgentID(gomock.Any(), agent.ID).Times(1).Return(workspace, nil)
|
||||
appStatus := database.WorkspaceAppStatus{
|
||||
ID: uuid.UUID{6},
|
||||
}
|
||||
mDB.EXPECT().GetLatestWorkspaceAppStatusByAppID(gomock.Any(), app.ID).Times(1).Return(appStatus, nil)
|
||||
mDB.EXPECT().InsertWorkspaceAppStatus(
|
||||
gomock.Any(),
|
||||
gomock.Cond(func(params database.InsertWorkspaceAppStatusParams) bool {
|
||||
if params.AgentID == agent.ID && params.AppID == app.ID {
|
||||
assert.Equal(t, "testing", params.Message)
|
||||
assert.Equal(t, database.WorkspaceAppStatusStateComplete, params.State)
|
||||
assert.True(t, params.Uri.Valid)
|
||||
assert.Equal(t, "https://example.com", params.Uri.String)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})).Times(1).Return(database.WorkspaceAppStatus{}, nil)
|
||||
|
||||
_, err := api.UpdateAppStatus(ctx, &agentproto.UpdateAppStatusRequest{
|
||||
Slug: "vscode",
|
||||
Message: "testing",
|
||||
Uri: "https://example.com",
|
||||
State: agentproto.UpdateAppStatusRequest_COMPLETE,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
kind := testutil.RequireReceive(ctx, t, workspaceUpdates)
|
||||
require.Equal(t, wspubsub.WorkspaceEventKindAgentAppStatusUpdate, kind)
|
||||
sent := fEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateTaskCompleted))
|
||||
require.Len(t, sent, 1)
|
||||
})
|
||||
|
||||
t.Run("FailUnknownApp", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
ctrl := gomock.NewController(t)
|
||||
mDB := dbmock.NewMockStore(ctrl)
|
||||
agent := database.WorkspaceAgent{
|
||||
ID: uuid.UUID{2},
|
||||
LifecycleState: database.WorkspaceAgentLifecycleStateReady,
|
||||
}
|
||||
|
||||
mDB.EXPECT().GetWorkspaceAppByAgentIDAndSlug(gomock.Any(), gomock.Any()).
|
||||
Times(1).
|
||||
Return(database.WorkspaceApp{}, sql.ErrNoRows)
|
||||
|
||||
api := &agentapi.AppsAPI{
|
||||
AgentFn: func(context.Context) (database.WorkspaceAgent, error) {
|
||||
return agent, nil
|
||||
},
|
||||
Database: mDB,
|
||||
Log: testutil.Logger(t),
|
||||
}
|
||||
_, err := api.UpdateAppStatus(ctx, &agentproto.UpdateAppStatusRequest{
|
||||
Slug: "unknown",
|
||||
Message: "testing",
|
||||
Uri: "https://example.com",
|
||||
State: agentproto.UpdateAppStatusRequest_COMPLETE,
|
||||
})
|
||||
require.ErrorContains(t, err, "No app found with slug")
|
||||
var sdkErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
require.Equal(t, http.StatusBadRequest, sdkErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("FailUnknownState", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
ctrl := gomock.NewController(t)
|
||||
mDB := dbmock.NewMockStore(ctrl)
|
||||
agent := database.WorkspaceAgent{
|
||||
ID: uuid.UUID{2},
|
||||
LifecycleState: database.WorkspaceAgentLifecycleStateReady,
|
||||
}
|
||||
|
||||
api := &agentapi.AppsAPI{
|
||||
AgentFn: func(context.Context) (database.WorkspaceAgent, error) {
|
||||
return agent, nil
|
||||
},
|
||||
Database: mDB,
|
||||
Log: testutil.Logger(t),
|
||||
}
|
||||
|
||||
_, err := api.UpdateAppStatus(ctx, &agentproto.UpdateAppStatusRequest{
|
||||
Slug: "vscode",
|
||||
Message: "testing",
|
||||
Uri: "https://example.com",
|
||||
State: 77,
|
||||
})
|
||||
require.ErrorContains(t, err, "Invalid state")
|
||||
var sdkErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
require.Equal(t, http.StatusBadRequest, sdkErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("FailTooLong", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
ctrl := gomock.NewController(t)
|
||||
mDB := dbmock.NewMockStore(ctrl)
|
||||
agent := database.WorkspaceAgent{
|
||||
ID: uuid.UUID{2},
|
||||
LifecycleState: database.WorkspaceAgentLifecycleStateReady,
|
||||
}
|
||||
|
||||
api := &agentapi.AppsAPI{
|
||||
AgentFn: func(context.Context) (database.WorkspaceAgent, error) {
|
||||
return agent, nil
|
||||
},
|
||||
Database: mDB,
|
||||
Log: testutil.Logger(t),
|
||||
}
|
||||
|
||||
_, err := api.UpdateAppStatus(ctx, &agentproto.UpdateAppStatusRequest{
|
||||
Slug: "vscode",
|
||||
Message: strings.Repeat("a", 161),
|
||||
Uri: "https://example.com",
|
||||
State: agentproto.UpdateAppStatusRequest_COMPLETE,
|
||||
})
|
||||
require.ErrorContains(t, err, "Message is too long")
|
||||
var sdkErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
require.Equal(t, http.StatusBadRequest, sdkErr.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -128,7 +128,7 @@ func (a *SubAgentAPI) CreateSubAgent(ctx context.Context, req *agentproto.Create
|
||||
Name: agentName,
|
||||
ResourceID: parentAgent.ResourceID,
|
||||
AuthToken: uuid.New(),
|
||||
AuthInstanceID: parentAgent.AuthInstanceID,
|
||||
AuthInstanceID: sql.NullString{},
|
||||
Architecture: req.Architecture,
|
||||
EnvironmentVariables: pqtype.NullRawMessage{},
|
||||
OperatingSystem: req.OperatingSystem,
|
||||
|
||||
@@ -175,6 +175,52 @@ func TestSubAgentAPI(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
// Context: https://github.com/coder/coder/pull/22196
|
||||
t.Run("CreateSubAgentDoesNotInheritAuthInstanceID", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
log = testutil.Logger(t)
|
||||
clock = quartz.NewMock(t)
|
||||
|
||||
db, org = newDatabaseWithOrg(t)
|
||||
user, agent = newUserWithWorkspaceAgent(t, db, org)
|
||||
)
|
||||
|
||||
// Given: The parent agent has an AuthInstanceID set
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
parentAgent, err := db.GetWorkspaceAgentByID(dbauthz.AsSystemRestricted(ctx), agent.ID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, parentAgent.AuthInstanceID.Valid, "parent agent should have an AuthInstanceID")
|
||||
require.NotEmpty(t, parentAgent.AuthInstanceID.String)
|
||||
|
||||
api := newAgentAPI(t, log, db, clock, user, org, agent)
|
||||
|
||||
// When: We create a sub agent
|
||||
createResp, err := api.CreateSubAgent(ctx, &proto.CreateSubAgentRequest{
|
||||
Name: "sub-agent",
|
||||
Directory: "/workspaces/test",
|
||||
Architecture: "amd64",
|
||||
OperatingSystem: "linux",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
subAgentID, err := uuid.FromBytes(createResp.Agent.Id)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: The sub-agent must NOT re-use the parent's AuthInstanceID.
|
||||
subAgent, err := db.GetWorkspaceAgentByID(dbauthz.AsSystemRestricted(ctx), subAgentID)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, subAgent.AuthInstanceID.Valid, "sub-agent should not have an AuthInstanceID")
|
||||
assert.Empty(t, subAgent.AuthInstanceID.String, "sub-agent AuthInstanceID string should be empty")
|
||||
|
||||
// Double-check: looking up by the parent's instance ID must
|
||||
// still return the parent, not the sub-agent.
|
||||
lookedUp, err := db.GetWorkspaceAgentByInstanceID(dbauthz.AsSystemRestricted(ctx), parentAgent.AuthInstanceID.String)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, parentAgent.ID, lookedUp.ID, "instance ID lookup should still return the parent agent")
|
||||
})
|
||||
|
||||
type expectedAppError struct {
|
||||
index int32
|
||||
field string
|
||||
@@ -1320,7 +1366,6 @@ func TestSubAgentAPI(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
+37
-3
@@ -21,10 +21,12 @@ import (
|
||||
agentapisdk "github.com/coder/agentapi-sdk-go"
|
||||
"github.com/coder/coder/v2/coderd/audit"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/coderd/httpapi/httperror"
|
||||
"github.com/coder/coder/v2/coderd/httpmw"
|
||||
"github.com/coder/coder/v2/coderd/notifications"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
"github.com/coder/coder/v2/coderd/rbac/policy"
|
||||
"github.com/coder/coder/v2/coderd/searchquery"
|
||||
@@ -190,7 +192,8 @@ func (api *API) tasksCreate(rw http.ResponseWriter, r *http.Request) {
|
||||
})
|
||||
defer commitAuditWS()
|
||||
|
||||
workspace, err := createWorkspace(ctx, aReqWS, apiKey.UserID, api, owner, createReq, r, &createWorkspaceOptions{
|
||||
workspace, err := createWorkspace(ctx, aReqWS, apiKey.UserID, api, owner, createReq, &createWorkspaceOptions{
|
||||
remoteAddr: r.RemoteAddr,
|
||||
// Before creating the workspace, ensure that this task can be created.
|
||||
preCreateInTX: func(ctx context.Context, tx database.Store) error {
|
||||
// Create task record in the database before creating the workspace so that
|
||||
@@ -464,7 +467,6 @@ func (api *API) convertTasks(ctx context.Context, requesterID uuid.UUID, dbTasks
|
||||
|
||||
apiWorkspaces, err := convertWorkspaces(
|
||||
ctx,
|
||||
api.Experiments,
|
||||
api.Logger,
|
||||
requesterID,
|
||||
workspaces,
|
||||
@@ -544,7 +546,6 @@ func (api *API) taskGet(rw http.ResponseWriter, r *http.Request) {
|
||||
|
||||
ws, err := convertWorkspace(
|
||||
ctx,
|
||||
api.Experiments,
|
||||
api.Logger,
|
||||
apiKey.UserID,
|
||||
workspace,
|
||||
@@ -1300,6 +1301,23 @@ func (api *API) pauseTask(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := api.NotificationsEnqueuer.Enqueue(
|
||||
// nolint:gocritic // Need notifier actor to enqueue notifications.
|
||||
dbauthz.AsNotifier(ctx),
|
||||
workspace.OwnerID,
|
||||
notifications.TemplateTaskPaused,
|
||||
map[string]string{
|
||||
"task": task.Name,
|
||||
"task_id": task.ID.String(),
|
||||
"workspace": workspace.Name,
|
||||
"pause_reason": "manual",
|
||||
},
|
||||
"api-task-pause",
|
||||
workspace.ID, workspace.OwnerID, workspace.OrganizationID,
|
||||
); err != nil {
|
||||
api.Logger.Warn(ctx, "failed to notify of task paused", slog.Error(err), slog.F("task_id", task.ID), slog.F("workspace_id", workspace.ID))
|
||||
}
|
||||
|
||||
httpapi.Write(ctx, rw, http.StatusAccepted, codersdk.PauseTaskResponse{
|
||||
WorkspaceBuild: &build,
|
||||
})
|
||||
@@ -1387,6 +1405,22 @@ func (api *API) resumeTask(rw http.ResponseWriter, r *http.Request) {
|
||||
httperror.WriteWorkspaceBuildError(ctx, rw, err)
|
||||
return
|
||||
}
|
||||
if _, err := api.NotificationsEnqueuer.Enqueue(
|
||||
// nolint:gocritic // Need notifier actor to enqueue notifications.
|
||||
dbauthz.AsNotifier(ctx),
|
||||
workspace.OwnerID,
|
||||
notifications.TemplateTaskResumed,
|
||||
map[string]string{
|
||||
"task": task.Name,
|
||||
"task_id": task.ID.String(),
|
||||
"workspace": workspace.Name,
|
||||
},
|
||||
"api-task-resume",
|
||||
workspace.ID, workspace.OwnerID, workspace.OrganizationID,
|
||||
); err != nil {
|
||||
api.Logger.Warn(ctx, "failed to notify of task resumed", slog.Error(err), slog.F("task_id", task.ID), slog.F("workspace_id", workspace.ID))
|
||||
}
|
||||
|
||||
httpapi.Write(ctx, rw, http.StatusAccepted, codersdk.ResumeTaskResponse{
|
||||
WorkspaceBuild: &build,
|
||||
})
|
||||
|
||||
+111
-39
@@ -45,10 +45,10 @@ import (
|
||||
)
|
||||
|
||||
// createTaskInState is a helper to create a task in the desired state.
|
||||
// It returns a function that takes context, test, and status, and returns the task ID.
|
||||
// It returns a function that takes context, test, and status, and returns the task.
|
||||
// The caller is responsible for setting up the database, owner, and user.
|
||||
func createTaskInState(db database.Store, ownerSubject rbac.Subject, ownerOrgID, userID uuid.UUID) func(context.Context, *testing.T, database.TaskStatus) uuid.UUID {
|
||||
return func(ctx context.Context, t *testing.T, status database.TaskStatus) uuid.UUID {
|
||||
func createTaskInState(db database.Store, ownerSubject rbac.Subject, ownerOrgID, userID uuid.UUID) func(context.Context, *testing.T, database.TaskStatus) database.Task {
|
||||
return func(ctx context.Context, t *testing.T, status database.TaskStatus) database.Task {
|
||||
ctx = dbauthz.As(ctx, ownerSubject)
|
||||
|
||||
builder := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
@@ -65,6 +65,9 @@ func createTaskInState(db database.Store, ownerSubject rbac.Subject, ownerOrgID,
|
||||
builder = builder.Pending()
|
||||
case database.TaskStatusInitializing:
|
||||
builder = builder.Starting()
|
||||
case database.TaskStatusActive:
|
||||
// Default builder produces a succeeded start build.
|
||||
// Post-processing below sets agent and app to active.
|
||||
case database.TaskStatusPaused:
|
||||
builder = builder.Seed(database.WorkspaceBuild{
|
||||
Transition: database.WorkspaceTransitionStop,
|
||||
@@ -76,31 +79,32 @@ func createTaskInState(db database.Store, ownerSubject rbac.Subject, ownerOrgID,
|
||||
}
|
||||
|
||||
resp := builder.Do()
|
||||
taskID := resp.Task.ID
|
||||
|
||||
// Post-process by manipulating agent and app state.
|
||||
if status == database.TaskStatusError {
|
||||
// First, set agent to ready state so agent_status returns 'active'.
|
||||
// This ensures the cascade reaches app_status.
|
||||
if status == database.TaskStatusActive || status == database.TaskStatusError {
|
||||
// Set agent to ready state so agent_status returns 'active'.
|
||||
err := db.UpdateWorkspaceAgentLifecycleStateByID(ctx, database.UpdateWorkspaceAgentLifecycleStateByIDParams{
|
||||
ID: resp.Agents[0].ID,
|
||||
LifecycleState: database.WorkspaceAgentLifecycleStateReady,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then set workspace app health to unhealthy to trigger error state.
|
||||
apps, err := db.GetWorkspaceAppsByAgentID(ctx, resp.Agents[0].ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, apps, 1, "expected exactly one app for task")
|
||||
|
||||
appHealth := database.WorkspaceAppHealthHealthy
|
||||
if status == database.TaskStatusError {
|
||||
appHealth = database.WorkspaceAppHealthUnhealthy
|
||||
}
|
||||
err = db.UpdateWorkspaceAppHealthByID(ctx, database.UpdateWorkspaceAppHealthByIDParams{
|
||||
ID: apps[0].ID,
|
||||
Health: database.WorkspaceAppHealthUnhealthy,
|
||||
Health: appHealth,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
return taskID
|
||||
return resp.Task
|
||||
}
|
||||
}
|
||||
|
||||
@@ -828,7 +832,7 @@ func TestTasks(t *testing.T) {
|
||||
t.Run("SendToNonActiveStates", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
|
||||
@@ -845,9 +849,9 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPaused)
|
||||
task := createTask(ctx, t, database.TaskStatusPaused)
|
||||
|
||||
err := client.TaskSend(ctx, "me", taskID, codersdk.TaskSendRequest{
|
||||
err := client.TaskSend(ctx, "me", task.ID, codersdk.TaskSendRequest{
|
||||
Input: "Hello",
|
||||
})
|
||||
|
||||
@@ -863,9 +867,9 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTask(ctx, t, database.TaskStatusInitializing)
|
||||
task := createTask(ctx, t, database.TaskStatusInitializing)
|
||||
|
||||
err := client.TaskSend(ctx, "me", taskID, codersdk.TaskSendRequest{
|
||||
err := client.TaskSend(ctx, "me", task.ID, codersdk.TaskSendRequest{
|
||||
Input: "Hello",
|
||||
})
|
||||
|
||||
@@ -881,9 +885,9 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPending)
|
||||
task := createTask(ctx, t, database.TaskStatusPending)
|
||||
|
||||
err := client.TaskSend(ctx, "me", taskID, codersdk.TaskSendRequest{
|
||||
err := client.TaskSend(ctx, "me", task.ID, codersdk.TaskSendRequest{
|
||||
Input: "Hello",
|
||||
})
|
||||
|
||||
@@ -899,9 +903,9 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTask(ctx, t, database.TaskStatusError)
|
||||
task := createTask(ctx, t, database.TaskStatusError)
|
||||
|
||||
err := client.TaskSend(ctx, "me", taskID, codersdk.TaskSendRequest{
|
||||
err := client.TaskSend(ctx, "me", task.ID, codersdk.TaskSendRequest{
|
||||
Input: "Hello",
|
||||
})
|
||||
|
||||
@@ -1120,16 +1124,16 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPending)
|
||||
task := createTask(ctx, t, database.TaskStatusPending)
|
||||
|
||||
err := db.UpsertTaskSnapshot(dbauthz.As(ctx, ownerSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskID,
|
||||
TaskID: task.ID,
|
||||
LogSnapshot: json.RawMessage(snapshotJSON),
|
||||
LogSnapshotCreatedAt: snapshotTime,
|
||||
})
|
||||
require.NoError(t, err, "upserting task snapshot")
|
||||
|
||||
logsResp, err := client.TaskLogs(ctx, "me", taskID)
|
||||
logsResp, err := client.TaskLogs(ctx, "me", task.ID)
|
||||
require.NoError(t, err, "fetching task logs")
|
||||
verifySnapshotLogs(t, logsResp)
|
||||
})
|
||||
@@ -1138,16 +1142,16 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTask(ctx, t, database.TaskStatusInitializing)
|
||||
task := createTask(ctx, t, database.TaskStatusInitializing)
|
||||
|
||||
err := db.UpsertTaskSnapshot(dbauthz.As(ctx, ownerSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskID,
|
||||
TaskID: task.ID,
|
||||
LogSnapshot: json.RawMessage(snapshotJSON),
|
||||
LogSnapshotCreatedAt: snapshotTime,
|
||||
})
|
||||
require.NoError(t, err, "upserting task snapshot")
|
||||
|
||||
logsResp, err := client.TaskLogs(ctx, "me", taskID)
|
||||
logsResp, err := client.TaskLogs(ctx, "me", task.ID)
|
||||
require.NoError(t, err, "fetching task logs")
|
||||
verifySnapshotLogs(t, logsResp)
|
||||
})
|
||||
@@ -1156,16 +1160,16 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPaused)
|
||||
task := createTask(ctx, t, database.TaskStatusPaused)
|
||||
|
||||
err := db.UpsertTaskSnapshot(dbauthz.As(ctx, ownerSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskID,
|
||||
TaskID: task.ID,
|
||||
LogSnapshot: json.RawMessage(snapshotJSON),
|
||||
LogSnapshotCreatedAt: snapshotTime,
|
||||
})
|
||||
require.NoError(t, err, "upserting task snapshot")
|
||||
|
||||
logsResp, err := client.TaskLogs(ctx, "me", taskID)
|
||||
logsResp, err := client.TaskLogs(ctx, "me", task.ID)
|
||||
require.NoError(t, err, "fetching task logs")
|
||||
verifySnapshotLogs(t, logsResp)
|
||||
})
|
||||
@@ -1174,9 +1178,9 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPending)
|
||||
task := createTask(ctx, t, database.TaskStatusPending)
|
||||
|
||||
logsResp, err := client.TaskLogs(ctx, "me", taskID)
|
||||
logsResp, err := client.TaskLogs(ctx, "me", task.ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, logsResp.Snapshot)
|
||||
@@ -1188,7 +1192,7 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPending)
|
||||
task := createTask(ctx, t, database.TaskStatusPending)
|
||||
|
||||
invalidEnvelope := coderd.TaskLogSnapshotEnvelope{
|
||||
Format: "unknown-format",
|
||||
@@ -1198,13 +1202,13 @@ func TestTasks(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.UpsertTaskSnapshot(dbauthz.As(ctx, ownerSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskID,
|
||||
TaskID: task.ID,
|
||||
LogSnapshot: json.RawMessage(invalidJSON),
|
||||
LogSnapshotCreatedAt: snapshotTime,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = client.TaskLogs(ctx, "me", taskID)
|
||||
_, err = client.TaskLogs(ctx, "me", task.ID)
|
||||
require.Error(t, err)
|
||||
|
||||
var sdkErr *codersdk.Error
|
||||
@@ -1217,16 +1221,16 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPending)
|
||||
task := createTask(ctx, t, database.TaskStatusPending)
|
||||
|
||||
err := db.UpsertTaskSnapshot(dbauthz.As(ctx, ownerSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskID,
|
||||
TaskID: task.ID,
|
||||
LogSnapshot: json.RawMessage(`{"format":"agentapi","data":"not an object"}`),
|
||||
LogSnapshotCreatedAt: snapshotTime,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = client.TaskLogs(ctx, "me", taskID)
|
||||
_, err = client.TaskLogs(ctx, "me", task.ID)
|
||||
require.Error(t, err)
|
||||
|
||||
var sdkErr *codersdk.Error
|
||||
@@ -1238,9 +1242,9 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTask(ctx, t, database.TaskStatusError)
|
||||
task := createTask(ctx, t, database.TaskStatusError)
|
||||
|
||||
_, err := client.TaskLogs(ctx, "me", taskID)
|
||||
_, err := client.TaskLogs(ctx, "me", task.ID)
|
||||
require.Error(t, err)
|
||||
|
||||
var sdkErr *codersdk.Error
|
||||
@@ -2563,7 +2567,6 @@ func TestPauseTask(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
task, _ := setupWorkspaceTask(t, db, owner)
|
||||
userClient, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID, tc.roles...)
|
||||
@@ -2787,6 +2790,41 @@ func TestPauseTask(t *testing.T) {
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusInternalServerError, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("Notification", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
notifyEnq = ¬ificationstest.FakeEnqueuer{}
|
||||
ownerClient, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{NotificationsEnqueuer: notifyEnq})
|
||||
owner = coderdtest.CreateFirstUser(t, ownerClient)
|
||||
)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
ownerUser, err := ownerClient.User(ctx, owner.UserID.String())
|
||||
require.NoError(t, err)
|
||||
|
||||
createTask := createTaskInState(db, coderdtest.AuthzUserSubject(ownerUser), owner.OrganizationID, owner.UserID)
|
||||
|
||||
// Given: A task in an active state
|
||||
task := createTask(ctx, t, database.TaskStatusActive)
|
||||
|
||||
workspace, err := ownerClient.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// When: We pause the task
|
||||
_, err = ownerClient.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: A notification should be sent
|
||||
sent := notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateTaskPaused))
|
||||
require.Len(t, sent, 1)
|
||||
require.Equal(t, owner.UserID, sent[0].UserID)
|
||||
require.Equal(t, task.Name, sent[0].Labels["task"])
|
||||
require.Equal(t, task.ID.String(), sent[0].Labels["task_id"])
|
||||
require.Equal(t, workspace.Name, sent[0].Labels["workspace"])
|
||||
require.Equal(t, "manual", sent[0].Labels["pause_reason"])
|
||||
})
|
||||
}
|
||||
|
||||
func TestResumeTask(t *testing.T) {
|
||||
@@ -3116,4 +3154,38 @@ func TestResumeTask(t *testing.T) {
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusInternalServerError, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("Notification", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
notifyEnq = ¬ificationstest.FakeEnqueuer{}
|
||||
ownerClient, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{NotificationsEnqueuer: notifyEnq})
|
||||
owner = coderdtest.CreateFirstUser(t, ownerClient)
|
||||
)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
ownerUser, err := ownerClient.User(ctx, owner.UserID.String())
|
||||
require.NoError(t, err)
|
||||
|
||||
createTask := createTaskInState(db, coderdtest.AuthzUserSubject(ownerUser), owner.OrganizationID, owner.UserID)
|
||||
|
||||
// Given: A task in a paused state
|
||||
task := createTask(ctx, t, database.TaskStatusPaused)
|
||||
|
||||
workspace, err := ownerClient.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// When: We resume the task
|
||||
_, err = ownerClient.ResumeTask(ctx, codersdk.Me, task.ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: A notification should be sent
|
||||
sent := notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateTaskResumed))
|
||||
require.Len(t, sent, 1)
|
||||
require.Equal(t, owner.UserID, sent[0].UserID)
|
||||
require.Equal(t, task.Name, sent[0].Labels["task"])
|
||||
require.Equal(t, task.ID.String(), sent[0].Labels["task_id"])
|
||||
require.Equal(t, workspace.Name, sent[0].Labels["workspace"])
|
||||
})
|
||||
}
|
||||
|
||||
Generated
+295
-14
@@ -135,6 +135,34 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/aibridge/models": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"AI Bridge"
|
||||
],
|
||||
"summary": "List AI Bridge models",
|
||||
"operationId": "list-ai-bridge-models",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/appearance": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -453,6 +481,99 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/chats/config": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Chat"
|
||||
],
|
||||
"summary": "Get chat config settings",
|
||||
"operationId": "get-chat-config-settings",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.ChatConfigSettings"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"put": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Chat"
|
||||
],
|
||||
"summary": "Update chat config settings",
|
||||
"operationId": "update-chat-config-settings",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Chat config settings request",
|
||||
"name": "request",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.ChatConfigSettings"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.ChatConfigSettings"
|
||||
}
|
||||
},
|
||||
"304": {
|
||||
"description": "Not Modified"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/chats/{chat}/archive": {
|
||||
"post": {
|
||||
"tags": [
|
||||
"Chats"
|
||||
],
|
||||
"summary": "Archive a chat",
|
||||
"operationId": "archive-chat",
|
||||
"responses": {
|
||||
"204": {
|
||||
"description": "No Content"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/chats/{chat}/unarchive": {
|
||||
"post": {
|
||||
"tags": [
|
||||
"Chats"
|
||||
],
|
||||
"summary": "Unarchive a chat",
|
||||
"operationId": "unarchive-chat",
|
||||
"responses": {
|
||||
"204": {
|
||||
"description": "No Content"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/connectionlog": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -3745,6 +3866,69 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/organizations/{organization}/members/{user}/workspaces/available-users": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Workspaces"
|
||||
],
|
||||
"summary": "Get users available for workspace creation",
|
||||
"operationId": "get-users-available-for-workspace-creation",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Organization ID",
|
||||
"name": "organization",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "User ID, name, or me",
|
||||
"name": "user",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Search query",
|
||||
"name": "q",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "integer",
|
||||
"description": "Limit results",
|
||||
"name": "limit",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "integer",
|
||||
"description": "Offset for pagination",
|
||||
"name": "offset",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.MinimalUser"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/organizations/{organization}/paginated-members": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -8175,6 +8359,12 @@ const docTemplate = `{
|
||||
"name": "user",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"description": "Include expired tokens in the list",
|
||||
"name": "include_expired",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -8386,6 +8576,54 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/users/{user}/keys/{keyid}/expire": {
|
||||
"put": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"Users"
|
||||
],
|
||||
"summary": "Expire API key",
|
||||
"operationId": "expire-api-key",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "User ID, name, or me",
|
||||
"name": "user",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"format": "string",
|
||||
"description": "Key ID",
|
||||
"name": "keyid",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"204": {
|
||||
"description": "No Content"
|
||||
},
|
||||
"404": {
|
||||
"description": "Not Found",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.Response"
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal Server Error",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.Response"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/users/{user}/login-type": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -9434,6 +9672,7 @@ const docTemplate = `{
|
||||
],
|
||||
"summary": "Patch workspace agent app status",
|
||||
"operationId": "patch-workspace-agent-app-status",
|
||||
"deprecated": true,
|
||||
"parameters": [
|
||||
{
|
||||
"description": "app status",
|
||||
@@ -12303,6 +12542,9 @@ const docTemplate = `{
|
||||
"api_key_id": {
|
||||
"type": "string"
|
||||
},
|
||||
"client": {
|
||||
"type": "string"
|
||||
},
|
||||
"ended_at": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
@@ -12634,6 +12876,11 @@ const docTemplate = `{
|
||||
"boundary_usage:delete",
|
||||
"boundary_usage:read",
|
||||
"boundary_usage:update",
|
||||
"chat:*",
|
||||
"chat:create",
|
||||
"chat:delete",
|
||||
"chat:read",
|
||||
"chat:update",
|
||||
"coder:all",
|
||||
"coder:apikeys.manage_self",
|
||||
"coder:application_connect",
|
||||
@@ -12838,6 +13085,11 @@ const docTemplate = `{
|
||||
"APIKeyScopeBoundaryUsageDelete",
|
||||
"APIKeyScopeBoundaryUsageRead",
|
||||
"APIKeyScopeBoundaryUsageUpdate",
|
||||
"APIKeyScopeChatAll",
|
||||
"APIKeyScopeChatCreate",
|
||||
"APIKeyScopeChatDelete",
|
||||
"APIKeyScopeChatRead",
|
||||
"APIKeyScopeChatUpdate",
|
||||
"APIKeyScopeCoderAll",
|
||||
"APIKeyScopeCoderApikeysManageSelf",
|
||||
"APIKeyScopeCoderApplicationConnect",
|
||||
@@ -13502,7 +13754,10 @@ const docTemplate = `{
|
||||
"cli",
|
||||
"ssh_connection",
|
||||
"vscode_connection",
|
||||
"jetbrains_connection"
|
||||
"jetbrains_connection",
|
||||
"task_auto_pause",
|
||||
"task_manual_pause",
|
||||
"task_resume"
|
||||
],
|
||||
"x-enum-varnames": [
|
||||
"BuildReasonInitiator",
|
||||
@@ -13513,7 +13768,10 @@ const docTemplate = `{
|
||||
"BuildReasonCLI",
|
||||
"BuildReasonSSHConnection",
|
||||
"BuildReasonVSCodeConnection",
|
||||
"BuildReasonJetbrainsConnection"
|
||||
"BuildReasonJetbrainsConnection",
|
||||
"BuildReasonTaskAutoPause",
|
||||
"BuildReasonTaskManualPause",
|
||||
"BuildReasonTaskResume"
|
||||
]
|
||||
},
|
||||
"codersdk.CORSBehavior": {
|
||||
@@ -13547,6 +13805,15 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.ChatConfigSettings": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"system_prompt": {
|
||||
"description": "SystemPrompt is the deployment-wide system prompt prepended to all\nnew chat conversations. When empty, the built-in default is used.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.ConnectionLatency": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -14693,6 +14960,9 @@ const docTemplate = `{
|
||||
"external_auth": {
|
||||
"$ref": "#/definitions/serpent.Struct-array_codersdk_ExternalAuthConfig"
|
||||
},
|
||||
"external_auth_github_default_provider_enable": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"external_token_encryption_keys": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
@@ -14977,17 +15247,17 @@ const docTemplate = `{
|
||||
"workspace-usage",
|
||||
"web-push",
|
||||
"oauth2",
|
||||
"mcp-server-http",
|
||||
"workspace-sharing"
|
||||
"agents",
|
||||
"mcp-server-http"
|
||||
],
|
||||
"x-enum-comments": {
|
||||
"ExperimentAgents": "Enables agent-powered chat functionality.",
|
||||
"ExperimentAutoFillParameters": "This should not be taken out of experiments until we have redesigned the feature.",
|
||||
"ExperimentExample": "This isn't used for anything.",
|
||||
"ExperimentMCPServerHTTP": "Enables the MCP HTTP server functionality.",
|
||||
"ExperimentNotifications": "Sends notifications via SMTP and webhooks following certain events.",
|
||||
"ExperimentOAuth2": "Enables OAuth2 provider functionality.",
|
||||
"ExperimentWebPush": "Enables web push notifications through the browser.",
|
||||
"ExperimentWorkspaceSharing": "Enables updating workspace ACLs for sharing with users and groups.",
|
||||
"ExperimentWorkspaceUsage": "Enables the new workspace usage tracking."
|
||||
},
|
||||
"x-enum-descriptions": [
|
||||
@@ -14997,8 +15267,8 @@ const docTemplate = `{
|
||||
"Enables the new workspace usage tracking.",
|
||||
"Enables web push notifications through the browser.",
|
||||
"Enables OAuth2 provider functionality.",
|
||||
"Enables the MCP HTTP server functionality.",
|
||||
"Enables updating workspace ACLs for sharing with users and groups."
|
||||
"Enables agent-powered chat functionality.",
|
||||
"Enables the MCP HTTP server functionality."
|
||||
],
|
||||
"x-enum-varnames": [
|
||||
"ExperimentExample",
|
||||
@@ -15007,8 +15277,8 @@ const docTemplate = `{
|
||||
"ExperimentWorkspaceUsage",
|
||||
"ExperimentWebPush",
|
||||
"ExperimentOAuth2",
|
||||
"ExperimentMCPServerHTTP",
|
||||
"ExperimentWorkspaceSharing"
|
||||
"ExperimentAgents",
|
||||
"ExperimentMCPServerHTTP"
|
||||
]
|
||||
},
|
||||
"codersdk.ExternalAPIKeyScopes": {
|
||||
@@ -15248,10 +15518,6 @@ const docTemplate = `{
|
||||
"limit": {
|
||||
"type": "integer"
|
||||
},
|
||||
"soft_limit": {
|
||||
"description": "SoftLimit is the soft limit of the feature, and is only used for showing\nincluded limits in the dashboard. No license validation or warnings are\ngenerated from this value.",
|
||||
"type": "integer"
|
||||
},
|
||||
"usage_period": {
|
||||
"description": "UsagePeriod denotes that the usage is a counter that accumulates over\nthis period (and most likely resets with the issuance of the next\nlicense).\n\nThese dates are determined from the license that this entitlement comes\nfrom, see enterprise/coderd/license/license.go.\n\nOnly certain features set these fields:\n- FeatureManagedAgentLimit",
|
||||
"allOf": [
|
||||
@@ -15455,6 +15721,9 @@ const docTemplate = `{
|
||||
"codersdk.HTTPCookieConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"host_prefix": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"same_site": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -16665,6 +16934,14 @@ const docTemplate = `{
|
||||
"organization_mapping": {
|
||||
"type": "object"
|
||||
},
|
||||
"redirect_url": {
|
||||
"description": "RedirectURL is optional, defaulting to 'ACCESS_URL'. Only useful in niche\nsituations where the OIDC callback domain is different from the ACCESS_URL\ndomain.",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/serpent.URL"
|
||||
}
|
||||
]
|
||||
},
|
||||
"scopes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
@@ -17941,6 +18218,7 @@ const docTemplate = `{
|
||||
"assign_role",
|
||||
"audit_log",
|
||||
"boundary_usage",
|
||||
"chat",
|
||||
"connection_log",
|
||||
"crypto_key",
|
||||
"debug_info",
|
||||
@@ -17986,6 +18264,7 @@ const docTemplate = `{
|
||||
"ResourceAssignRole",
|
||||
"ResourceAuditLog",
|
||||
"ResourceBoundaryUsage",
|
||||
"ResourceChat",
|
||||
"ResourceConnectionLog",
|
||||
"ResourceCryptoKey",
|
||||
"ResourceDebugInfo",
|
||||
@@ -18216,6 +18495,7 @@ const docTemplate = `{
|
||||
"health_settings",
|
||||
"notifications_settings",
|
||||
"prebuilds_settings",
|
||||
"chat_config_settings",
|
||||
"workspace_proxy",
|
||||
"organization",
|
||||
"oauth2_provider_app",
|
||||
@@ -18244,6 +18524,7 @@ const docTemplate = `{
|
||||
"ResourceTypeHealthSettings",
|
||||
"ResourceTypeNotificationsSettings",
|
||||
"ResourceTypePrebuildsSettings",
|
||||
"ResourceTypeChatConfigSettings",
|
||||
"ResourceTypeWorkspaceProxy",
|
||||
"ResourceTypeOrganization",
|
||||
"ResourceTypeOAuth2ProviderApp",
|
||||
@@ -22748,7 +23029,7 @@ const docTemplate = `{
|
||||
]
|
||||
},
|
||||
"default": {
|
||||
"description": "Default is parsed into Value if set.",
|
||||
"description": "Default is parsed into Value if set.\nMust be ` + "`" + `\"\"` + "`" + ` if ` + "`" + `DefaultFn` + "`" + ` != nil",
|
||||
"type": "string"
|
||||
},
|
||||
"description": {
|
||||
|
||||
Generated
+271
-14
@@ -112,6 +112,30 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/aibridge/models": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": ["application/json"],
|
||||
"tags": ["AI Bridge"],
|
||||
"summary": "List AI Bridge models",
|
||||
"operationId": "list-ai-bridge-models",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/appearance": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -386,6 +410,85 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/chats/config": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": ["application/json"],
|
||||
"tags": ["Chat"],
|
||||
"summary": "Get chat config settings",
|
||||
"operationId": "get-chat-config-settings",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.ChatConfigSettings"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"put": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"consumes": ["application/json"],
|
||||
"produces": ["application/json"],
|
||||
"tags": ["Chat"],
|
||||
"summary": "Update chat config settings",
|
||||
"operationId": "update-chat-config-settings",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Chat config settings request",
|
||||
"name": "request",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.ChatConfigSettings"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.ChatConfigSettings"
|
||||
}
|
||||
},
|
||||
"304": {
|
||||
"description": "Not Modified"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/chats/{chat}/archive": {
|
||||
"post": {
|
||||
"tags": ["Chats"],
|
||||
"summary": "Archive a chat",
|
||||
"operationId": "archive-chat",
|
||||
"responses": {
|
||||
"204": {
|
||||
"description": "No Content"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/chats/{chat}/unarchive": {
|
||||
"post": {
|
||||
"tags": ["Chats"],
|
||||
"summary": "Unarchive a chat",
|
||||
"operationId": "unarchive-chat",
|
||||
"responses": {
|
||||
"204": {
|
||||
"description": "No Content"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/connectionlog": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -3296,6 +3399,65 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/organizations/{organization}/members/{user}/workspaces/available-users": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": ["application/json"],
|
||||
"tags": ["Workspaces"],
|
||||
"summary": "Get users available for workspace creation",
|
||||
"operationId": "get-users-available-for-workspace-creation",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Organization ID",
|
||||
"name": "organization",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "User ID, name, or me",
|
||||
"name": "user",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Search query",
|
||||
"name": "q",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "integer",
|
||||
"description": "Limit results",
|
||||
"name": "limit",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "integer",
|
||||
"description": "Offset for pagination",
|
||||
"name": "offset",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.MinimalUser"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/organizations/{organization}/paginated-members": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -7226,6 +7388,12 @@
|
||||
"name": "user",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"description": "Include expired tokens in the list",
|
||||
"name": "include_expired",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -7417,6 +7585,52 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/users/{user}/keys/{keyid}/expire": {
|
||||
"put": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"tags": ["Users"],
|
||||
"summary": "Expire API key",
|
||||
"operationId": "expire-api-key",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "User ID, name, or me",
|
||||
"name": "user",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"format": "string",
|
||||
"description": "Key ID",
|
||||
"name": "keyid",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"204": {
|
||||
"description": "No Content"
|
||||
},
|
||||
"404": {
|
||||
"description": "Not Found",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.Response"
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal Server Error",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.Response"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/users/{user}/login-type": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -8339,6 +8553,7 @@
|
||||
"tags": ["Agents"],
|
||||
"summary": "Patch workspace agent app status",
|
||||
"operationId": "patch-workspace-agent-app-status",
|
||||
"deprecated": true,
|
||||
"parameters": [
|
||||
{
|
||||
"description": "app status",
|
||||
@@ -10925,6 +11140,9 @@
|
||||
"api_key_id": {
|
||||
"type": "string"
|
||||
},
|
||||
"client": {
|
||||
"type": "string"
|
||||
},
|
||||
"ended_at": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
@@ -11248,6 +11466,11 @@
|
||||
"boundary_usage:delete",
|
||||
"boundary_usage:read",
|
||||
"boundary_usage:update",
|
||||
"chat:*",
|
||||
"chat:create",
|
||||
"chat:delete",
|
||||
"chat:read",
|
||||
"chat:update",
|
||||
"coder:all",
|
||||
"coder:apikeys.manage_self",
|
||||
"coder:application_connect",
|
||||
@@ -11452,6 +11675,11 @@
|
||||
"APIKeyScopeBoundaryUsageDelete",
|
||||
"APIKeyScopeBoundaryUsageRead",
|
||||
"APIKeyScopeBoundaryUsageUpdate",
|
||||
"APIKeyScopeChatAll",
|
||||
"APIKeyScopeChatCreate",
|
||||
"APIKeyScopeChatDelete",
|
||||
"APIKeyScopeChatRead",
|
||||
"APIKeyScopeChatUpdate",
|
||||
"APIKeyScopeCoderAll",
|
||||
"APIKeyScopeCoderApikeysManageSelf",
|
||||
"APIKeyScopeCoderApplicationConnect",
|
||||
@@ -12099,7 +12327,10 @@
|
||||
"cli",
|
||||
"ssh_connection",
|
||||
"vscode_connection",
|
||||
"jetbrains_connection"
|
||||
"jetbrains_connection",
|
||||
"task_auto_pause",
|
||||
"task_manual_pause",
|
||||
"task_resume"
|
||||
],
|
||||
"x-enum-varnames": [
|
||||
"BuildReasonInitiator",
|
||||
@@ -12110,7 +12341,10 @@
|
||||
"BuildReasonCLI",
|
||||
"BuildReasonSSHConnection",
|
||||
"BuildReasonVSCodeConnection",
|
||||
"BuildReasonJetbrainsConnection"
|
||||
"BuildReasonJetbrainsConnection",
|
||||
"BuildReasonTaskAutoPause",
|
||||
"BuildReasonTaskManualPause",
|
||||
"BuildReasonTaskResume"
|
||||
]
|
||||
},
|
||||
"codersdk.CORSBehavior": {
|
||||
@@ -12134,6 +12368,15 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.ChatConfigSettings": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"system_prompt": {
|
||||
"description": "SystemPrompt is the deployment-wide system prompt prepended to all\nnew chat conversations. When empty, the built-in default is used.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.ConnectionLatency": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -13233,6 +13476,9 @@
|
||||
"external_auth": {
|
||||
"$ref": "#/definitions/serpent.Struct-array_codersdk_ExternalAuthConfig"
|
||||
},
|
||||
"external_auth_github_default_provider_enable": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"external_token_encryption_keys": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
@@ -13510,17 +13756,17 @@
|
||||
"workspace-usage",
|
||||
"web-push",
|
||||
"oauth2",
|
||||
"mcp-server-http",
|
||||
"workspace-sharing"
|
||||
"agents",
|
||||
"mcp-server-http"
|
||||
],
|
||||
"x-enum-comments": {
|
||||
"ExperimentAgents": "Enables agent-powered chat functionality.",
|
||||
"ExperimentAutoFillParameters": "This should not be taken out of experiments until we have redesigned the feature.",
|
||||
"ExperimentExample": "This isn't used for anything.",
|
||||
"ExperimentMCPServerHTTP": "Enables the MCP HTTP server functionality.",
|
||||
"ExperimentNotifications": "Sends notifications via SMTP and webhooks following certain events.",
|
||||
"ExperimentOAuth2": "Enables OAuth2 provider functionality.",
|
||||
"ExperimentWebPush": "Enables web push notifications through the browser.",
|
||||
"ExperimentWorkspaceSharing": "Enables updating workspace ACLs for sharing with users and groups.",
|
||||
"ExperimentWorkspaceUsage": "Enables the new workspace usage tracking."
|
||||
},
|
||||
"x-enum-descriptions": [
|
||||
@@ -13530,8 +13776,8 @@
|
||||
"Enables the new workspace usage tracking.",
|
||||
"Enables web push notifications through the browser.",
|
||||
"Enables OAuth2 provider functionality.",
|
||||
"Enables the MCP HTTP server functionality.",
|
||||
"Enables updating workspace ACLs for sharing with users and groups."
|
||||
"Enables agent-powered chat functionality.",
|
||||
"Enables the MCP HTTP server functionality."
|
||||
],
|
||||
"x-enum-varnames": [
|
||||
"ExperimentExample",
|
||||
@@ -13540,8 +13786,8 @@
|
||||
"ExperimentWorkspaceUsage",
|
||||
"ExperimentWebPush",
|
||||
"ExperimentOAuth2",
|
||||
"ExperimentMCPServerHTTP",
|
||||
"ExperimentWorkspaceSharing"
|
||||
"ExperimentAgents",
|
||||
"ExperimentMCPServerHTTP"
|
||||
]
|
||||
},
|
||||
"codersdk.ExternalAPIKeyScopes": {
|
||||
@@ -13781,10 +14027,6 @@
|
||||
"limit": {
|
||||
"type": "integer"
|
||||
},
|
||||
"soft_limit": {
|
||||
"description": "SoftLimit is the soft limit of the feature, and is only used for showing\nincluded limits in the dashboard. No license validation or warnings are\ngenerated from this value.",
|
||||
"type": "integer"
|
||||
},
|
||||
"usage_period": {
|
||||
"description": "UsagePeriod denotes that the usage is a counter that accumulates over\nthis period (and most likely resets with the issuance of the next\nlicense).\n\nThese dates are determined from the license that this entitlement comes\nfrom, see enterprise/coderd/license/license.go.\n\nOnly certain features set these fields:\n- FeatureManagedAgentLimit",
|
||||
"allOf": [
|
||||
@@ -13982,6 +14224,9 @@
|
||||
"codersdk.HTTPCookieConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"host_prefix": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"same_site": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -15135,6 +15380,14 @@
|
||||
"organization_mapping": {
|
||||
"type": "object"
|
||||
},
|
||||
"redirect_url": {
|
||||
"description": "RedirectURL is optional, defaulting to 'ACCESS_URL'. Only useful in niche\nsituations where the OIDC callback domain is different from the ACCESS_URL\ndomain.",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/serpent.URL"
|
||||
}
|
||||
]
|
||||
},
|
||||
"scopes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
@@ -16359,6 +16612,7 @@
|
||||
"assign_role",
|
||||
"audit_log",
|
||||
"boundary_usage",
|
||||
"chat",
|
||||
"connection_log",
|
||||
"crypto_key",
|
||||
"debug_info",
|
||||
@@ -16404,6 +16658,7 @@
|
||||
"ResourceAssignRole",
|
||||
"ResourceAuditLog",
|
||||
"ResourceBoundaryUsage",
|
||||
"ResourceChat",
|
||||
"ResourceConnectionLog",
|
||||
"ResourceCryptoKey",
|
||||
"ResourceDebugInfo",
|
||||
@@ -16624,6 +16879,7 @@
|
||||
"health_settings",
|
||||
"notifications_settings",
|
||||
"prebuilds_settings",
|
||||
"chat_config_settings",
|
||||
"workspace_proxy",
|
||||
"organization",
|
||||
"oauth2_provider_app",
|
||||
@@ -16652,6 +16908,7 @@
|
||||
"ResourceTypeHealthSettings",
|
||||
"ResourceTypeNotificationsSettings",
|
||||
"ResourceTypePrebuildsSettings",
|
||||
"ResourceTypeChatConfigSettings",
|
||||
"ResourceTypeWorkspaceProxy",
|
||||
"ResourceTypeOrganization",
|
||||
"ResourceTypeOAuth2ProviderApp",
|
||||
@@ -20924,7 +21181,7 @@
|
||||
]
|
||||
},
|
||||
"default": {
|
||||
"description": "Default is parsed into Value if set.",
|
||||
"description": "Default is parsed into Value if set.\nMust be `\"\"` if `DefaultFn` != nil",
|
||||
"type": "string"
|
||||
},
|
||||
"description": {
|
||||
|
||||
+77
-8
@@ -307,20 +307,26 @@ func (api *API) apiKeyByName(rw http.ResponseWriter, r *http.Request) {
|
||||
// @Tags Users
|
||||
// @Param user path string true "User ID, name, or me"
|
||||
// @Success 200 {array} codersdk.APIKey
|
||||
// @Param include_expired query bool false "Include expired tokens in the list"
|
||||
// @Router /users/{user}/keys/tokens [get]
|
||||
func (api *API) tokens(rw http.ResponseWriter, r *http.Request) {
|
||||
var (
|
||||
ctx = r.Context()
|
||||
user = httpmw.UserParam(r)
|
||||
keys []database.APIKey
|
||||
err error
|
||||
queryStr = r.URL.Query().Get("include_all")
|
||||
includeAll, _ = strconv.ParseBool(queryStr)
|
||||
ctx = r.Context()
|
||||
user = httpmw.UserParam(r)
|
||||
keys []database.APIKey
|
||||
err error
|
||||
queryStr = r.URL.Query().Get("include_all")
|
||||
includeAll, _ = strconv.ParseBool(queryStr)
|
||||
expiredStr = r.URL.Query().Get("include_expired")
|
||||
includeExpired, _ = strconv.ParseBool(expiredStr)
|
||||
)
|
||||
|
||||
if includeAll {
|
||||
// get tokens for all users
|
||||
keys, err = api.Database.GetAPIKeysByLoginType(ctx, database.LoginTypeToken)
|
||||
keys, err = api.Database.GetAPIKeysByLoginType(ctx, database.GetAPIKeysByLoginTypeParams{
|
||||
LoginType: database.LoginTypeToken,
|
||||
IncludeExpired: includeExpired,
|
||||
})
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error fetching API keys.",
|
||||
@@ -330,7 +336,7 @@ func (api *API) tokens(rw http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
} else {
|
||||
// get user's tokens only
|
||||
keys, err = api.Database.GetAPIKeysByUserID(ctx, database.GetAPIKeysByUserIDParams{LoginType: database.LoginTypeToken, UserID: user.ID})
|
||||
keys, err = api.Database.GetAPIKeysByUserID(ctx, database.GetAPIKeysByUserIDParams{LoginType: database.LoginTypeToken, UserID: user.ID, IncludeExpired: includeExpired})
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error fetching API keys.",
|
||||
@@ -421,6 +427,69 @@ func (api *API) deleteAPIKey(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// @Summary Expire API key
|
||||
// @ID expire-api-key
|
||||
// @Security CoderSessionToken
|
||||
// @Tags Users
|
||||
// @Param user path string true "User ID, name, or me"
|
||||
// @Param keyid path string true "Key ID" format(string)
|
||||
// @Success 204
|
||||
// @Failure 404 {object} codersdk.Response
|
||||
// @Failure 500 {object} codersdk.Response
|
||||
// @Router /users/{user}/keys/{keyid}/expire [put]
|
||||
func (api *API) expireAPIKey(rw http.ResponseWriter, r *http.Request) {
|
||||
var (
|
||||
ctx = r.Context()
|
||||
keyID = chi.URLParam(r, "keyid")
|
||||
auditor = api.Auditor.Load()
|
||||
aReq, commitAudit = audit.InitRequest[database.APIKey](rw, &audit.RequestParams{
|
||||
Audit: *auditor,
|
||||
Log: api.Logger,
|
||||
Request: r,
|
||||
Action: database.AuditActionWrite,
|
||||
})
|
||||
)
|
||||
defer commitAudit()
|
||||
|
||||
if err := api.Database.InTx(func(db database.Store) error {
|
||||
key, err := db.GetAPIKeyByID(ctx, keyID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fetch API key: %w", err)
|
||||
}
|
||||
if !key.ExpiresAt.After(api.Clock.Now()) {
|
||||
return nil // Already expired
|
||||
}
|
||||
aReq.Old = key
|
||||
if err := db.UpdateAPIKeyByID(ctx, database.UpdateAPIKeyByIDParams{
|
||||
ID: key.ID,
|
||||
LastUsed: key.LastUsed,
|
||||
ExpiresAt: dbtime.Now(),
|
||||
IPAddress: key.IPAddress,
|
||||
}); err != nil {
|
||||
return xerrors.Errorf("expire API key: %w", err)
|
||||
}
|
||||
// Fetch the updated key for audit log.
|
||||
newKey, err := db.GetAPIKeyByID(ctx, keyID)
|
||||
if err != nil {
|
||||
api.Logger.Warn(ctx, "failed to fetch updated API key for audit log", slog.Error(err))
|
||||
} else {
|
||||
aReq.New = newKey
|
||||
}
|
||||
return nil
|
||||
}, nil); httpapi.Is404Error(err) {
|
||||
httpapi.ResourceNotFound(rw)
|
||||
return
|
||||
} else if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error expiring API key.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
rw.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// @Summary Get token config
|
||||
// @ID get-token-config
|
||||
// @Security CoderSessionToken
|
||||
|
||||
+196
-3
@@ -69,6 +69,44 @@ func TestTokenCRUD(t *testing.T) {
|
||||
require.Equal(t, database.AuditActionDelete, auditor.AuditLogs()[numLogs-1].Action)
|
||||
}
|
||||
|
||||
func TestTokensFilterExpired(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
adminClient := coderdtest.New(t, nil)
|
||||
_ = coderdtest.CreateFirstUser(t, adminClient)
|
||||
|
||||
// Create a token.
|
||||
res, err := adminClient.CreateToken(ctx, codersdk.Me, codersdk.CreateTokenRequest{
|
||||
Lifetime: time.Hour * 24 * 7,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
keyID := strings.Split(res.Key, "-")[0]
|
||||
|
||||
// List tokens without including expired - should see the token.
|
||||
keys, err := adminClient.Tokens(ctx, codersdk.Me, codersdk.TokensFilter{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, keys, 1)
|
||||
|
||||
// Expire the token.
|
||||
err = adminClient.ExpireAPIKey(ctx, codersdk.Me, keyID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// List tokens without including expired - should NOT see expired token.
|
||||
keys, err = adminClient.Tokens(ctx, codersdk.Me, codersdk.TokensFilter{})
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, keys)
|
||||
|
||||
// List tokens WITH including expired - should see expired token.
|
||||
keys, err = adminClient.Tokens(ctx, codersdk.Me, codersdk.TokensFilter{
|
||||
IncludeExpired: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, keys, 1)
|
||||
require.Equal(t, keyID, keys[0].ID)
|
||||
}
|
||||
|
||||
func TestTokenScoped(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -439,7 +477,7 @@ func TestAPIKey_PrebuildsNotAllowed(t *testing.T) {
|
||||
DeploymentValues: dc,
|
||||
})
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
// Given: an existing api token for the prebuilds user
|
||||
_, prebuildsToken := dbgen.APIKey(t, db, database.APIKey{
|
||||
@@ -448,12 +486,167 @@ func TestAPIKey_PrebuildsNotAllowed(t *testing.T) {
|
||||
client.SetSessionToken(prebuildsToken)
|
||||
|
||||
// When: the prebuilds user tries to create an API key
|
||||
_, err := client.CreateAPIKey(ctx, database.PrebuildsSystemUserID.String())
|
||||
_, err := client.CreateAPIKey(setupCtx, database.PrebuildsSystemUserID.String())
|
||||
// Then: denied.
|
||||
require.ErrorContains(t, err, httpapi.ResourceForbiddenResponse.Message)
|
||||
|
||||
// When: the prebuilds user tries to create a token
|
||||
_, err = client.CreateToken(ctx, database.PrebuildsSystemUserID.String(), codersdk.CreateTokenRequest{})
|
||||
_, err = client.CreateToken(setupCtx, database.PrebuildsSystemUserID.String(), codersdk.CreateTokenRequest{})
|
||||
// Then: also denied.
|
||||
require.ErrorContains(t, err, httpapi.ResourceForbiddenResponse.Message)
|
||||
}
|
||||
|
||||
//nolint:tparallel,paralleltest // Subtests share the same coderdtest instance and auditor.
|
||||
func TestExpireAPIKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
auditor := audit.NewMock()
|
||||
adminClient := coderdtest.New(t, &coderdtest.Options{Auditor: auditor})
|
||||
admin := coderdtest.CreateFirstUser(t, adminClient)
|
||||
memberClient, member := coderdtest.CreateAnotherUser(t, adminClient, admin.OrganizationID)
|
||||
|
||||
t.Run("OwnerCanExpireOwnToken", func(t *testing.T) {
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
// Create a token.
|
||||
res, err := adminClient.CreateToken(ctx, codersdk.Me, codersdk.CreateTokenRequest{
|
||||
Lifetime: time.Hour * 24 * 7,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
keyID := strings.Split(res.Key, "-")[0]
|
||||
|
||||
// Verify the token is not expired.
|
||||
key, err := adminClient.APIKeyByID(ctx, codersdk.Me, keyID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, key.ExpiresAt.After(time.Now()))
|
||||
|
||||
auditor.ResetLogs()
|
||||
|
||||
// Expire the token.
|
||||
err = adminClient.ExpireAPIKey(ctx, codersdk.Me, keyID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the token is expired.
|
||||
key, err = adminClient.APIKeyByID(ctx, codersdk.Me, keyID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, key.ExpiresAt.Before(time.Now()))
|
||||
|
||||
// Verify audit log.
|
||||
als := auditor.AuditLogs()
|
||||
require.Len(t, als, 1)
|
||||
require.Equal(t, database.AuditActionWrite, als[0].Action)
|
||||
require.Equal(t, database.ResourceTypeApiKey, als[0].ResourceType)
|
||||
require.Equal(t, admin.UserID.String(), als[0].UserID.String())
|
||||
})
|
||||
|
||||
t.Run("AdminCanExpireOtherUsersToken", func(t *testing.T) {
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
// Create a token for the member.
|
||||
res, err := memberClient.CreateToken(ctx, codersdk.Me, codersdk.CreateTokenRequest{
|
||||
Lifetime: time.Hour * 24 * 7,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
keyID := strings.Split(res.Key, "-")[0]
|
||||
|
||||
// Admin expires the member's token.
|
||||
err = adminClient.ExpireAPIKey(ctx, member.ID.String(), keyID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the token is expired.
|
||||
key, err := memberClient.APIKeyByID(ctx, codersdk.Me, keyID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, key.ExpiresAt.Before(time.Now()))
|
||||
})
|
||||
|
||||
t.Run("MemberCannotExpireOtherUsersToken", func(t *testing.T) {
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
// Create a token for the admin.
|
||||
res, err := adminClient.CreateToken(ctx, codersdk.Me, codersdk.CreateTokenRequest{
|
||||
Lifetime: time.Hour * 24 * 7,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
keyID := strings.Split(res.Key, "-")[0]
|
||||
|
||||
// Member attempts to expire admin's token.
|
||||
err = memberClient.ExpireAPIKey(ctx, admin.UserID.String(), keyID)
|
||||
require.Error(t, err)
|
||||
var sdkErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
// Members cannot read other users, so they get a 404 Not Found
|
||||
// from the authorization layer.
|
||||
require.Equal(t, http.StatusNotFound, sdkErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("NotFound", func(t *testing.T) {
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
// Try to expire a non-existent token.
|
||||
err := adminClient.ExpireAPIKey(ctx, codersdk.Me, "nonexistent")
|
||||
require.Error(t, err)
|
||||
var sdkErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
require.Equal(t, http.StatusNotFound, sdkErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("ExpiringAlreadyExpiredTokenSucceeds", func(t *testing.T) {
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
// Create and expire a token.
|
||||
res, err := adminClient.CreateToken(ctx, codersdk.Me, codersdk.CreateTokenRequest{
|
||||
Lifetime: time.Hour * 24 * 7,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
keyID := strings.Split(res.Key, "-")[0]
|
||||
|
||||
// Expire it once.
|
||||
err = adminClient.ExpireAPIKey(ctx, codersdk.Me, keyID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Invariant: make sure it's actually expired
|
||||
key, err := adminClient.APIKeyByID(ctx, codersdk.Me, keyID)
|
||||
require.NoError(t, err)
|
||||
require.LessOrEqual(t, key.ExpiresAt, time.Now(), "key should be expired")
|
||||
|
||||
// Expire it again - should succeed (idempotent).
|
||||
err = adminClient.ExpireAPIKey(ctx, codersdk.Me, keyID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Token should still be just as expired as before. No more, no less.
|
||||
keyAgain, err := adminClient.APIKeyByID(ctx, codersdk.Me, keyID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, key.ExpiresAt, keyAgain.ExpiresAt, "expiration should be idempotent")
|
||||
})
|
||||
|
||||
t.Run("DeletingExpiredTokenSucceeds", func(t *testing.T) {
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
// Create a token.
|
||||
res, err := adminClient.CreateToken(ctx, codersdk.Me, codersdk.CreateTokenRequest{
|
||||
Lifetime: time.Hour * 24 * 7,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
keyID := strings.Split(res.Key, "-")[0]
|
||||
|
||||
// Expire it first.
|
||||
err = adminClient.ExpireAPIKey(ctx, codersdk.Me, keyID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify it's expired.
|
||||
key, err := adminClient.APIKeyByID(ctx, codersdk.Me, keyID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, key.ExpiresAt.Before(time.Now()))
|
||||
|
||||
// Delete the expired token - should succeed.
|
||||
err = adminClient.DeleteAPIKey(ctx, codersdk.Me, keyID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify it's gone.
|
||||
_, err = adminClient.APIKeyByID(ctx, codersdk.Me, keyID)
|
||||
require.Error(t, err)
|
||||
var sdkErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
require.Equal(t, http.StatusNotFound, sdkErr.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -32,7 +32,8 @@ type Auditable interface {
|
||||
idpsync.OrganizationSyncSettings |
|
||||
idpsync.GroupSyncSettings |
|
||||
idpsync.RoleSyncSettings |
|
||||
database.TaskTable
|
||||
database.TaskTable |
|
||||
database.ChatConfigSettings
|
||||
}
|
||||
|
||||
// Map is a map of changed fields in an audited resource. It maps field names to
|
||||
|
||||
@@ -112,6 +112,8 @@ func ResourceTarget[T Auditable](tgt T) string {
|
||||
return "" // no target?
|
||||
case database.PrebuildsSettings:
|
||||
return "" // no target?
|
||||
case database.ChatConfigSettings:
|
||||
return "" // no target?
|
||||
case database.OAuth2ProviderApp:
|
||||
return typed.Name
|
||||
case database.OAuth2ProviderAppSecret:
|
||||
@@ -176,6 +178,9 @@ func ResourceID[T Auditable](tgt T) uuid.UUID {
|
||||
case database.PrebuildsSettings:
|
||||
// Artificial ID for auditing purposes
|
||||
return typed.ID
|
||||
case database.ChatConfigSettings:
|
||||
// Artificial ID for auditing purposes
|
||||
return typed.ID
|
||||
case database.OAuth2ProviderApp:
|
||||
return typed.ID
|
||||
case database.OAuth2ProviderAppSecret:
|
||||
@@ -231,6 +236,8 @@ func ResourceType[T Auditable](tgt T) database.ResourceType {
|
||||
return database.ResourceTypeNotificationsSettings
|
||||
case database.PrebuildsSettings:
|
||||
return database.ResourceTypePrebuildsSettings
|
||||
case database.ChatConfigSettings:
|
||||
return database.ResourceTypeChatConfigSettings
|
||||
case database.OAuth2ProviderApp:
|
||||
return database.ResourceTypeOauth2ProviderApp
|
||||
case database.OAuth2ProviderAppSecret:
|
||||
@@ -289,6 +296,8 @@ func ResourceRequiresOrgID[T Auditable]() bool {
|
||||
case database.PrebuildsSettings:
|
||||
// Artificial ID for auditing purposes
|
||||
return false
|
||||
case database.ChatConfigSettings:
|
||||
return false
|
||||
case database.OAuth2ProviderApp:
|
||||
return false
|
||||
case database.OAuth2ProviderAppSecret:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package coderd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
@@ -8,6 +9,7 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/coderd/httpmw"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
@@ -91,6 +93,36 @@ func (h *HTTPAuthorizer) Authorize(r *http.Request, action policy.Action, object
|
||||
return true
|
||||
}
|
||||
|
||||
// AuthorizeContext checks whether the RBAC subject on the context
|
||||
// is authorized to perform the given action. The subject must have
|
||||
// been set via dbauthz.As or the ExtractAPIKey middleware. Returns
|
||||
// false if the subject is missing or unauthorized.
|
||||
func (h *HTTPAuthorizer) AuthorizeContext(ctx context.Context, action policy.Action, object rbac.Objecter) bool {
|
||||
roles, ok := dbauthz.ActorFromContext(ctx)
|
||||
if !ok {
|
||||
h.Logger.Error(ctx, "no authorization actor in context")
|
||||
return false
|
||||
}
|
||||
err := h.Authorizer.Authorize(ctx, roles, action, object.RBACObject())
|
||||
if err != nil {
|
||||
internalError := new(rbac.UnauthorizedError)
|
||||
logger := h.Logger
|
||||
if xerrors.As(err, internalError) {
|
||||
logger = h.Logger.With(slog.F("internal_error", internalError.Internal()))
|
||||
}
|
||||
logger.Warn(ctx, "requester is not authorized to access the object",
|
||||
slog.F("roles", roles.SafeRoleNames()),
|
||||
slog.F("actor_id", roles.ID),
|
||||
slog.F("actor_name", roles),
|
||||
slog.F("scope", roles.SafeScopeName()),
|
||||
slog.F("action", action),
|
||||
slog.F("object", object),
|
||||
)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// AuthorizeSQLFilter returns an authorization filter that can used in a
|
||||
// SQL 'WHERE' clause. If the filter is used, the resulting rows returned
|
||||
// from postgres are already authorized, and the caller does not need to
|
||||
@@ -106,6 +138,22 @@ func (h *HTTPAuthorizer) AuthorizeSQLFilter(r *http.Request, action policy.Actio
|
||||
return prepared, nil
|
||||
}
|
||||
|
||||
// AuthorizeSQLFilterContext is like AuthorizeSQLFilter but reads the
|
||||
// RBAC subject from the context directly rather than from an
|
||||
// *http.Request. The subject must have been set via dbauthz.As.
|
||||
func (h *HTTPAuthorizer) AuthorizeSQLFilterContext(ctx context.Context, action policy.Action, objectType string) (rbac.PreparedAuthorized, error) {
|
||||
roles, ok := dbauthz.ActorFromContext(ctx)
|
||||
if !ok {
|
||||
return nil, xerrors.New("no authorization actor in context")
|
||||
}
|
||||
prepared, err := h.Authorizer.Prepare(ctx, roles, action, objectType)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("prepare filter: %w", err)
|
||||
}
|
||||
|
||||
return prepared, nil
|
||||
}
|
||||
|
||||
// checkAuthorization returns if the current API key can use the given
|
||||
// permissions, factoring in the current user's roles and the API key scopes.
|
||||
//
|
||||
|
||||
@@ -231,6 +231,7 @@ func (e *Executor) runOnce(t time.Time) Stats {
|
||||
job *database.ProvisionerJob
|
||||
auditLog *auditParams
|
||||
shouldNotifyDormancy bool
|
||||
shouldNotifyTaskPause bool
|
||||
nextBuild *database.WorkspaceBuild
|
||||
activeTemplateVersion database.TemplateVersion
|
||||
ws database.Workspace
|
||||
@@ -316,6 +317,10 @@ func (e *Executor) runOnce(t time.Time) Stats {
|
||||
return nil
|
||||
}
|
||||
|
||||
if reason == database.BuildReasonTaskAutoPause {
|
||||
shouldNotifyTaskPause = true
|
||||
}
|
||||
|
||||
// Get the template version job to access tags
|
||||
templateVersionJob, err := tx.GetProvisionerJobByID(e.ctx, activeTemplateVersion.JobID)
|
||||
if err != nil {
|
||||
@@ -482,6 +487,28 @@ func (e *Executor) runOnce(t time.Time) Stats {
|
||||
log.Warn(e.ctx, "failed to notify of workspace marked as dormant", slog.Error(err), slog.F("workspace_id", ws.ID))
|
||||
}
|
||||
}
|
||||
if shouldNotifyTaskPause {
|
||||
task, err := e.db.GetTaskByID(e.ctx, ws.TaskID.UUID)
|
||||
if err != nil {
|
||||
log.Warn(e.ctx, "failed to get task for pause notification", slog.Error(err), slog.F("task_id", ws.TaskID.UUID), slog.F("workspace_id", ws.ID))
|
||||
} else {
|
||||
if _, err := e.notificationsEnqueuer.Enqueue(
|
||||
e.ctx,
|
||||
ws.OwnerID,
|
||||
notifications.TemplateTaskPaused,
|
||||
map[string]string{
|
||||
"task": task.Name,
|
||||
"task_id": task.ID.String(),
|
||||
"workspace": ws.Name,
|
||||
"pause_reason": "idle timeout",
|
||||
},
|
||||
"lifecycle_executor",
|
||||
ws.ID, ws.OwnerID, ws.OrganizationID,
|
||||
); err != nil {
|
||||
log.Warn(e.ctx, "failed to notify of task paused", slog.Error(err), slog.F("task_id", ws.TaskID.UUID), slog.F("workspace_id", ws.ID))
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
if err != nil && !xerrors.Is(err, context.Canceled) {
|
||||
@@ -525,10 +552,18 @@ func getNextTransition(
|
||||
) {
|
||||
switch {
|
||||
case isEligibleForAutostop(user, ws, latestBuild, latestJob, currentTick):
|
||||
// Use task-specific reason for AI task workspaces.
|
||||
if ws.TaskID.Valid {
|
||||
return database.WorkspaceTransitionStop, database.BuildReasonTaskAutoPause, nil
|
||||
}
|
||||
return database.WorkspaceTransitionStop, database.BuildReasonAutostop, nil
|
||||
case isEligibleForAutostart(user, ws, latestBuild, latestJob, templateSchedule, currentTick):
|
||||
return database.WorkspaceTransitionStart, database.BuildReasonAutostart, nil
|
||||
case isEligibleForFailedStop(latestBuild, latestJob, templateSchedule, currentTick):
|
||||
// Use task-specific reason for AI task workspaces.
|
||||
if ws.TaskID.Valid {
|
||||
return database.WorkspaceTransitionStop, database.BuildReasonTaskAutoPause, nil
|
||||
}
|
||||
return database.WorkspaceTransitionStop, database.BuildReasonAutostop, nil
|
||||
case isEligibleForDormantStop(ws, templateSchedule, currentTick):
|
||||
// Only stop started workspaces.
|
||||
|
||||
@@ -5,12 +5,113 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/schedule"
|
||||
)
|
||||
|
||||
func Test_getNextTransition_TaskAutoPause(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Set up a workspace that is eligible for autostop (past deadline).
|
||||
now := time.Now()
|
||||
pastDeadline := now.Add(-time.Hour)
|
||||
|
||||
okUser := database.User{Status: database.UserStatusActive}
|
||||
okBuild := database.WorkspaceBuild{
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
Deadline: pastDeadline,
|
||||
}
|
||||
okJob := database.ProvisionerJob{
|
||||
JobStatus: database.ProvisionerJobStatusSucceeded,
|
||||
}
|
||||
okTemplateSchedule := schedule.TemplateScheduleOptions{}
|
||||
|
||||
// Failed build setup for failedstop tests.
|
||||
failedBuild := database.WorkspaceBuild{
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
}
|
||||
failedJob := database.ProvisionerJob{
|
||||
JobStatus: database.ProvisionerJobStatusFailed,
|
||||
CompletedAt: sql.NullTime{Time: now.Add(-time.Hour), Valid: true},
|
||||
}
|
||||
failedTemplateSchedule := schedule.TemplateScheduleOptions{
|
||||
FailureTTL: time.Minute, // TTL already elapsed since job completed an hour ago.
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
Name string
|
||||
Workspace database.Workspace
|
||||
Build database.WorkspaceBuild
|
||||
Job database.ProvisionerJob
|
||||
TemplateSchedule schedule.TemplateScheduleOptions
|
||||
ExpectedReason database.BuildReason
|
||||
}{
|
||||
{
|
||||
Name: "RegularWorkspace_Autostop",
|
||||
Workspace: database.Workspace{
|
||||
DormantAt: sql.NullTime{Valid: false},
|
||||
},
|
||||
Build: okBuild,
|
||||
Job: okJob,
|
||||
TemplateSchedule: okTemplateSchedule,
|
||||
ExpectedReason: database.BuildReasonAutostop,
|
||||
},
|
||||
{
|
||||
Name: "TaskWorkspace_Autostop_UsesTaskAutoPause",
|
||||
Workspace: database.Workspace{
|
||||
DormantAt: sql.NullTime{Valid: false},
|
||||
TaskID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
},
|
||||
Build: okBuild,
|
||||
Job: okJob,
|
||||
TemplateSchedule: okTemplateSchedule,
|
||||
ExpectedReason: database.BuildReasonTaskAutoPause,
|
||||
},
|
||||
{
|
||||
Name: "RegularWorkspace_FailedStop",
|
||||
Workspace: database.Workspace{
|
||||
DormantAt: sql.NullTime{Valid: false},
|
||||
},
|
||||
Build: failedBuild,
|
||||
Job: failedJob,
|
||||
TemplateSchedule: failedTemplateSchedule,
|
||||
ExpectedReason: database.BuildReasonAutostop,
|
||||
},
|
||||
{
|
||||
Name: "TaskWorkspace_FailedStop_UsesTaskAutoPause",
|
||||
Workspace: database.Workspace{
|
||||
DormantAt: sql.NullTime{Valid: false},
|
||||
TaskID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
},
|
||||
Build: failedBuild,
|
||||
Job: failedJob,
|
||||
TemplateSchedule: failedTemplateSchedule,
|
||||
ExpectedReason: database.BuildReasonTaskAutoPause,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
transition, reason, err := getNextTransition(
|
||||
okUser,
|
||||
tc.Workspace,
|
||||
tc.Build,
|
||||
tc.Job,
|
||||
tc.TemplateSchedule,
|
||||
now,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, database.WorkspaceTransitionStop, transition)
|
||||
require.Equal(t, tc.ExpectedReason, reason)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_isEligibleForAutostart(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
@@ -2019,5 +2019,69 @@ func TestExecutorTaskWorkspace(t *testing.T) {
|
||||
assert.Contains(t, stats.Transitions, workspace.ID, "task workspace should be in transitions")
|
||||
assert.Equal(t, database.WorkspaceTransitionStop, stats.Transitions[workspace.ID], "should autostop the workspace")
|
||||
require.Empty(t, stats.Errors, "should have no errors when managing task workspaces")
|
||||
|
||||
// Then: The build reason should be TaskAutoPause (not regular Autostop)
|
||||
workspace = coderdtest.MustWorkspace(t, client, workspace.ID)
|
||||
_ = coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
workspace = coderdtest.MustWorkspace(t, client, workspace.ID)
|
||||
assert.Equal(t, codersdk.BuildReasonTaskAutoPause, workspace.LatestBuild.Reason, "task workspace should use TaskAutoPause build reason")
|
||||
})
|
||||
|
||||
t.Run("AutostopNotification", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
tickCh = make(chan time.Time)
|
||||
statsCh = make(chan autobuild.Stats)
|
||||
notifyEnq = notificationstest.FakeEnqueuer{}
|
||||
client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
AutobuildTicker: tickCh,
|
||||
IncludeProvisionerDaemon: true,
|
||||
AutobuildStats: statsCh,
|
||||
NotificationsEnqueuer: ¬ifyEnq,
|
||||
})
|
||||
admin = coderdtest.CreateFirstUser(t, client)
|
||||
)
|
||||
|
||||
// Given: A task workspace with an 8 hour deadline
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
template := createTaskTemplate(t, client, admin.OrganizationID, ctx, 8*time.Hour)
|
||||
workspace := createTaskWorkspace(t, client, template, ctx, "test task for autostop notification")
|
||||
|
||||
// Given: The workspace is currently running
|
||||
workspace = coderdtest.MustWorkspace(t, client, workspace.ID)
|
||||
require.Equal(t, codersdk.WorkspaceTransitionStart, workspace.LatestBuild.Transition)
|
||||
require.NotZero(t, workspace.LatestBuild.Deadline, "workspace should have a deadline for autostop")
|
||||
|
||||
p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, map[string]string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// When: the autobuild executor ticks after the deadline
|
||||
go func() {
|
||||
tickTime := workspace.LatestBuild.Deadline.Time.Add(time.Minute)
|
||||
coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime)
|
||||
tickCh <- tickTime
|
||||
close(tickCh)
|
||||
}()
|
||||
|
||||
// Then: We expect to see a stop transition
|
||||
stats := <-statsCh
|
||||
require.Len(t, stats.Transitions, 1, "lifecycle executor should transition the task workspace")
|
||||
assert.Contains(t, stats.Transitions, workspace.ID, "task workspace should be in transitions")
|
||||
assert.Equal(t, database.WorkspaceTransitionStop, stats.Transitions[workspace.ID], "should autostop the workspace")
|
||||
require.Empty(t, stats.Errors, "should have no errors when managing task workspaces")
|
||||
|
||||
// Then: A task paused notification was sent with "idle timeout" reason
|
||||
require.True(t, workspace.TaskID.Valid, "workspace should have a task ID")
|
||||
task, err := db.GetTaskByID(dbauthz.AsSystemRestricted(ctx), workspace.TaskID.UUID)
|
||||
require.NoError(t, err)
|
||||
|
||||
sent := notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateTaskPaused))
|
||||
require.Len(t, sent, 1)
|
||||
require.Equal(t, workspace.OwnerID, sent[0].UserID)
|
||||
require.Equal(t, task.Name, sent[0].Labels["task"])
|
||||
require.Equal(t, task.ID.String(), sent[0].Labels["task_id"])
|
||||
require.Equal(t, workspace.Name, sent[0].Labels["workspace"])
|
||||
require.Equal(t, "idle timeout", sent[0].Labels["pause_reason"])
|
||||
})
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,86 @@
|
||||
package chatd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
)
|
||||
|
||||
func TestRefreshChatWorkspaceSnapshot_NoReloadWhenWorkspacePresent(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
workspaceID := uuid.New()
|
||||
chat := database.Chat{
|
||||
ID: uuid.New(),
|
||||
WorkspaceID: uuid.NullUUID{
|
||||
UUID: workspaceID,
|
||||
Valid: true,
|
||||
},
|
||||
}
|
||||
|
||||
calls := 0
|
||||
refreshed, err := refreshChatWorkspaceSnapshot(
|
||||
context.Background(),
|
||||
chat,
|
||||
func(context.Context, uuid.UUID) (database.Chat, error) {
|
||||
calls++
|
||||
return database.Chat{}, nil
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, chat, refreshed)
|
||||
require.Equal(t, 0, calls)
|
||||
}
|
||||
|
||||
func TestRefreshChatWorkspaceSnapshot_ReloadsWhenWorkspaceMissing(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
chatID := uuid.New()
|
||||
workspaceID := uuid.New()
|
||||
chat := database.Chat{ID: chatID}
|
||||
reloaded := database.Chat{
|
||||
ID: chatID,
|
||||
WorkspaceID: uuid.NullUUID{
|
||||
UUID: workspaceID,
|
||||
Valid: true,
|
||||
},
|
||||
}
|
||||
|
||||
calls := 0
|
||||
refreshed, err := refreshChatWorkspaceSnapshot(
|
||||
context.Background(),
|
||||
chat,
|
||||
func(_ context.Context, id uuid.UUID) (database.Chat, error) {
|
||||
calls++
|
||||
require.Equal(t, chatID, id)
|
||||
return reloaded, nil
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, reloaded, refreshed)
|
||||
require.Equal(t, 1, calls)
|
||||
}
|
||||
|
||||
func TestRefreshChatWorkspaceSnapshot_ReturnsReloadError(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
chat := database.Chat{ID: uuid.New()}
|
||||
loadErr := xerrors.New("boom")
|
||||
|
||||
refreshed, err := refreshChatWorkspaceSnapshot(
|
||||
context.Background(),
|
||||
chat,
|
||||
func(context.Context, uuid.UUID) (database.Chat, error) {
|
||||
return database.Chat{}, loadErr
|
||||
},
|
||||
)
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "reload chat workspace state")
|
||||
require.ErrorContains(t, err, loadErr.Error())
|
||||
require.Equal(t, chat, refreshed)
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,708 @@
|
||||
package chatloop
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"charm.land/fantasy"
|
||||
fantasyanthropic "charm.land/fantasy/providers/anthropic"
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/chatd/chatprompt"
|
||||
"github.com/coder/coder/v2/coderd/chatd/chatretry"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
const (
|
||||
interruptedToolResultErrorMessage = "tool call was interrupted before it produced a result"
|
||||
)
|
||||
|
||||
var ErrInterrupted = xerrors.New("chat interrupted")
|
||||
|
||||
// PersistedStep contains the full content of a completed or
|
||||
// interrupted agent step. Content includes both assistant blocks
|
||||
// (text, reasoning, tool calls) and tool result blocks, mirroring
|
||||
// what fantasy provides in StepResult.Content. The persistence
|
||||
// layer is responsible for splitting these into separate database
|
||||
// messages by role.
|
||||
type PersistedStep struct {
|
||||
Content []fantasy.Content
|
||||
Usage fantasy.Usage
|
||||
ContextLimit sql.NullInt64
|
||||
}
|
||||
|
||||
// RunOptions configures a single streaming chat loop run.
|
||||
type RunOptions struct {
|
||||
Model fantasy.LanguageModel
|
||||
Messages []fantasy.Message
|
||||
Tools []fantasy.AgentTool
|
||||
StreamCall fantasy.AgentStreamCall
|
||||
MaxSteps int
|
||||
|
||||
ActiveTools []string
|
||||
ContextLimitFallback int64
|
||||
|
||||
PersistStep func(context.Context, PersistedStep) error
|
||||
PublishMessagePart func(
|
||||
role fantasy.MessageRole,
|
||||
part codersdk.ChatMessagePart,
|
||||
)
|
||||
Compaction *CompactionOptions
|
||||
|
||||
// OnRetry is called before each retry attempt when the LLM
|
||||
// stream fails with a retryable error. It provides the attempt
|
||||
// number, error, and backoff delay so callers can publish status
|
||||
// events to connected clients.
|
||||
OnRetry chatretry.OnRetryFn
|
||||
|
||||
OnInterruptedPersistError func(error)
|
||||
}
|
||||
|
||||
// Run executes the chat step-stream loop and delegates persistence/publishing to callbacks.
|
||||
func Run(ctx context.Context, opts RunOptions) (*fantasy.AgentResult, error) {
|
||||
if opts.Model == nil {
|
||||
return nil, xerrors.New("chat model is required")
|
||||
}
|
||||
if opts.PersistStep == nil {
|
||||
return nil, xerrors.New("persist step callback is required")
|
||||
}
|
||||
if opts.MaxSteps <= 0 {
|
||||
opts.MaxSteps = 1
|
||||
}
|
||||
|
||||
publishMessagePart := func(role fantasy.MessageRole, part codersdk.ChatMessagePart) {
|
||||
if opts.PublishMessagePart == nil {
|
||||
return
|
||||
}
|
||||
opts.PublishMessagePart(role, part)
|
||||
}
|
||||
|
||||
var (
|
||||
stepStateMu sync.Mutex
|
||||
streamToolNames map[string]string
|
||||
streamReasoningTitles map[string]string
|
||||
streamReasoningText map[string]string
|
||||
// stepToolResultContents tracks tool results received during
|
||||
// streaming. These are needed for the interrupted-step path
|
||||
// where OnStepFinish never fires.
|
||||
stepToolResultContents []fantasy.ToolResultContent
|
||||
stepAssistantDraft []fantasy.Content
|
||||
stepToolCallIndexByID map[string]int
|
||||
)
|
||||
|
||||
resetStepState := func() {
|
||||
stepStateMu.Lock()
|
||||
streamToolNames = make(map[string]string)
|
||||
streamReasoningTitles = make(map[string]string)
|
||||
streamReasoningText = make(map[string]string)
|
||||
stepToolResultContents = nil
|
||||
stepAssistantDraft = nil
|
||||
stepToolCallIndexByID = make(map[string]int)
|
||||
stepStateMu.Unlock()
|
||||
}
|
||||
|
||||
setReasoningTitleFromText := func(id string, text string) {
|
||||
if id == "" || strings.TrimSpace(text) == "" {
|
||||
return
|
||||
}
|
||||
|
||||
stepStateMu.Lock()
|
||||
defer stepStateMu.Unlock()
|
||||
|
||||
if streamReasoningTitles[id] != "" {
|
||||
return
|
||||
}
|
||||
|
||||
streamReasoningText[id] += text
|
||||
if !strings.ContainsAny(streamReasoningText[id], "\r\n") {
|
||||
return
|
||||
}
|
||||
title := chatprompt.ReasoningTitleFromFirstLine(streamReasoningText[id])
|
||||
if title == "" {
|
||||
return
|
||||
}
|
||||
|
||||
streamReasoningTitles[id] = title
|
||||
}
|
||||
|
||||
appendDraftText := func(text string) {
|
||||
if text == "" {
|
||||
return
|
||||
}
|
||||
|
||||
stepStateMu.Lock()
|
||||
defer stepStateMu.Unlock()
|
||||
|
||||
if len(stepAssistantDraft) > 0 {
|
||||
lastIndex := len(stepAssistantDraft) - 1
|
||||
switch last := stepAssistantDraft[lastIndex].(type) {
|
||||
case fantasy.TextContent:
|
||||
last.Text += text
|
||||
stepAssistantDraft[lastIndex] = last
|
||||
return
|
||||
case *fantasy.TextContent:
|
||||
last.Text += text
|
||||
stepAssistantDraft[lastIndex] = fantasy.TextContent{Text: last.Text}
|
||||
return
|
||||
}
|
||||
}
|
||||
stepAssistantDraft = append(stepAssistantDraft, fantasy.TextContent{Text: text})
|
||||
}
|
||||
|
||||
appendDraftReasoning := func(text string) {
|
||||
if text == "" {
|
||||
return
|
||||
}
|
||||
|
||||
stepStateMu.Lock()
|
||||
defer stepStateMu.Unlock()
|
||||
|
||||
if len(stepAssistantDraft) > 0 {
|
||||
lastIndex := len(stepAssistantDraft) - 1
|
||||
switch last := stepAssistantDraft[lastIndex].(type) {
|
||||
case fantasy.ReasoningContent:
|
||||
last.Text += text
|
||||
stepAssistantDraft[lastIndex] = last
|
||||
return
|
||||
case *fantasy.ReasoningContent:
|
||||
last.Text += text
|
||||
stepAssistantDraft[lastIndex] = fantasy.ReasoningContent{Text: last.Text}
|
||||
return
|
||||
}
|
||||
}
|
||||
stepAssistantDraft = append(stepAssistantDraft, fantasy.ReasoningContent{Text: text})
|
||||
}
|
||||
|
||||
upsertDraftToolCall := func(toolCallID, toolName, input string, appendInput bool) {
|
||||
if toolCallID == "" {
|
||||
return
|
||||
}
|
||||
|
||||
stepStateMu.Lock()
|
||||
defer stepStateMu.Unlock()
|
||||
|
||||
if strings.TrimSpace(toolName) != "" {
|
||||
streamToolNames[toolCallID] = toolName
|
||||
}
|
||||
|
||||
index, exists := stepToolCallIndexByID[toolCallID]
|
||||
if !exists {
|
||||
stepToolCallIndexByID[toolCallID] = len(stepAssistantDraft)
|
||||
stepAssistantDraft = append(stepAssistantDraft, fantasy.ToolCallContent{
|
||||
ToolCallID: toolCallID,
|
||||
ToolName: toolName,
|
||||
Input: input,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if index < 0 || index >= len(stepAssistantDraft) {
|
||||
stepToolCallIndexByID[toolCallID] = len(stepAssistantDraft)
|
||||
stepAssistantDraft = append(stepAssistantDraft, fantasy.ToolCallContent{
|
||||
ToolCallID: toolCallID,
|
||||
ToolName: toolName,
|
||||
Input: input,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
existingCall, ok := fantasy.AsContentType[fantasy.ToolCallContent](stepAssistantDraft[index])
|
||||
if !ok {
|
||||
if ptrCall, ptrOK := fantasy.AsContentType[*fantasy.ToolCallContent](stepAssistantDraft[index]); ptrOK && ptrCall != nil {
|
||||
existingCall = *ptrCall
|
||||
ok = true
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
stepToolCallIndexByID[toolCallID] = len(stepAssistantDraft)
|
||||
stepAssistantDraft = append(stepAssistantDraft, fantasy.ToolCallContent{
|
||||
ToolCallID: toolCallID,
|
||||
ToolName: toolName,
|
||||
Input: input,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if strings.TrimSpace(toolName) != "" {
|
||||
existingCall.ToolName = toolName
|
||||
}
|
||||
if appendInput {
|
||||
existingCall.Input += input
|
||||
} else if input != "" || existingCall.Input == "" {
|
||||
existingCall.Input = input
|
||||
}
|
||||
stepAssistantDraft[index] = existingCall
|
||||
}
|
||||
|
||||
appendDraftSource := func(source fantasy.SourceContent) {
|
||||
stepStateMu.Lock()
|
||||
stepAssistantDraft = append(stepAssistantDraft, source)
|
||||
stepStateMu.Unlock()
|
||||
}
|
||||
|
||||
persistInterruptedStep := func() error {
|
||||
stepStateMu.Lock()
|
||||
draft := append([]fantasy.Content(nil), stepAssistantDraft...)
|
||||
toolResults := append([]fantasy.ToolResultContent(nil), stepToolResultContents...)
|
||||
toolNameByCallID := make(map[string]string, len(streamToolNames))
|
||||
for id, name := range streamToolNames {
|
||||
toolNameByCallID[id] = name
|
||||
}
|
||||
stepStateMu.Unlock()
|
||||
|
||||
if len(draft) == 0 && len(toolResults) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Track which tool calls already have results.
|
||||
answeredToolCalls := make(map[string]struct{}, len(toolResults))
|
||||
for _, tr := range toolResults {
|
||||
if tr.ToolCallID != "" {
|
||||
answeredToolCalls[tr.ToolCallID] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Build the combined content: draft + received tool results
|
||||
// + synthetic interrupted results for unanswered tool calls.
|
||||
content := make([]fantasy.Content, 0, len(draft)+len(toolResults))
|
||||
content = append(content, draft...)
|
||||
for _, tr := range toolResults {
|
||||
content = append(content, tr)
|
||||
}
|
||||
|
||||
for _, block := range draft {
|
||||
toolCall, ok := fantasy.AsContentType[fantasy.ToolCallContent](block)
|
||||
if !ok {
|
||||
if ptrCall, ptrOK := fantasy.AsContentType[*fantasy.ToolCallContent](block); ptrOK && ptrCall != nil {
|
||||
toolCall = *ptrCall
|
||||
ok = true
|
||||
}
|
||||
}
|
||||
if !ok || toolCall.ToolCallID == "" {
|
||||
continue
|
||||
}
|
||||
if _, exists := answeredToolCalls[toolCall.ToolCallID]; exists {
|
||||
continue
|
||||
}
|
||||
|
||||
toolName := strings.TrimSpace(toolCall.ToolName)
|
||||
if toolName == "" {
|
||||
toolName = strings.TrimSpace(toolNameByCallID[toolCall.ToolCallID])
|
||||
}
|
||||
|
||||
content = append(content, fantasy.ToolResultContent{
|
||||
ToolCallID: toolCall.ToolCallID,
|
||||
ToolName: toolName,
|
||||
Result: fantasy.ToolResultOutputContentError{
|
||||
Error: xerrors.New(interruptedToolResultErrorMessage),
|
||||
},
|
||||
})
|
||||
answeredToolCalls[toolCall.ToolCallID] = struct{}{}
|
||||
}
|
||||
|
||||
persistCtx := context.WithoutCancel(ctx)
|
||||
return opts.PersistStep(persistCtx, PersistedStep{
|
||||
Content: content,
|
||||
})
|
||||
}
|
||||
|
||||
resetStepState()
|
||||
|
||||
agent := fantasy.NewAgent(
|
||||
opts.Model,
|
||||
fantasy.WithTools(opts.Tools...),
|
||||
fantasy.WithStopConditions(fantasy.StepCountIs(opts.MaxSteps)),
|
||||
)
|
||||
applyAnthropicCaching := shouldApplyAnthropicPromptCaching(opts.Model)
|
||||
// Fantasy's AgentStreamCall currently requires a non-empty Prompt and always
|
||||
// appends it as a user message. chatd already supplies the full history in
|
||||
// Messages, so we pass and then strip a sentinel user message in PrepareStep.
|
||||
sentinelPrompt := "__chatd_agent_prompt_sentinel_" + uuid.NewString()
|
||||
|
||||
streamCall := opts.StreamCall
|
||||
streamCall.Prompt = sentinelPrompt
|
||||
streamCall.Messages = opts.Messages
|
||||
streamCall.PrepareStep = func(
|
||||
stepCtx context.Context,
|
||||
options fantasy.PrepareStepFunctionOptions,
|
||||
) (context.Context, fantasy.PrepareStepResult, error) {
|
||||
return stepCtx, prepareStepResult(
|
||||
options.Messages,
|
||||
sentinelPrompt,
|
||||
opts.ActiveTools,
|
||||
applyAnthropicCaching,
|
||||
), nil
|
||||
}
|
||||
streamCall.OnStepStart = func(_ int) error {
|
||||
resetStepState()
|
||||
return nil
|
||||
}
|
||||
streamCall.OnTextDelta = func(_ string, text string) error {
|
||||
appendDraftText(text)
|
||||
publishMessagePart(fantasy.MessageRoleAssistant, codersdk.ChatMessagePart{
|
||||
Type: codersdk.ChatMessagePartTypeText,
|
||||
Text: text,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
streamCall.OnReasoningDelta = func(id string, text string) error {
|
||||
appendDraftReasoning(text)
|
||||
setReasoningTitleFromText(id, text)
|
||||
stepStateMu.Lock()
|
||||
title := streamReasoningTitles[id]
|
||||
stepStateMu.Unlock()
|
||||
publishMessagePart(fantasy.MessageRoleAssistant, codersdk.ChatMessagePart{
|
||||
Type: codersdk.ChatMessagePartTypeReasoning,
|
||||
Text: text,
|
||||
Title: title,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
streamCall.OnReasoningEnd = func(id string, _ fantasy.ReasoningContent) error {
|
||||
stepStateMu.Lock()
|
||||
if streamReasoningTitles[id] == "" {
|
||||
// At the end of reasoning we have the full text, so we can
|
||||
// safely evaluate first-line title format even if no newline
|
||||
// ever arrived in deltas.
|
||||
streamReasoningTitles[id] = chatprompt.ReasoningTitleFromFirstLine(
|
||||
streamReasoningText[id],
|
||||
)
|
||||
}
|
||||
title := streamReasoningTitles[id]
|
||||
stepStateMu.Unlock()
|
||||
if title != "" {
|
||||
// Publish a title-only reasoning part so clients can update the
|
||||
// reasoning header when metadata arrives at the end of streaming.
|
||||
publishMessagePart(fantasy.MessageRoleAssistant, codersdk.ChatMessagePart{
|
||||
Type: codersdk.ChatMessagePartTypeReasoning,
|
||||
Title: title,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
streamCall.OnToolInputStart = func(id, toolName string) error {
|
||||
upsertDraftToolCall(id, toolName, "", false)
|
||||
return nil
|
||||
}
|
||||
streamCall.OnToolInputDelta = func(id, delta string) error {
|
||||
stepStateMu.Lock()
|
||||
toolName := streamToolNames[id]
|
||||
stepStateMu.Unlock()
|
||||
upsertDraftToolCall(id, toolName, delta, true)
|
||||
publishMessagePart(fantasy.MessageRoleAssistant, codersdk.ChatMessagePart{
|
||||
Type: codersdk.ChatMessagePartTypeToolCall,
|
||||
ToolCallID: id,
|
||||
ToolName: toolName,
|
||||
ArgsDelta: delta,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
streamCall.OnToolCall = func(toolCall fantasy.ToolCallContent) error {
|
||||
upsertDraftToolCall(toolCall.ToolCallID, toolCall.ToolName, toolCall.Input, false)
|
||||
publishMessagePart(
|
||||
fantasy.MessageRoleAssistant,
|
||||
chatprompt.PartFromContent(toolCall),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
streamCall.OnSource = func(source fantasy.SourceContent) error {
|
||||
appendDraftSource(source)
|
||||
publishMessagePart(
|
||||
fantasy.MessageRoleAssistant,
|
||||
chatprompt.PartFromContent(source),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
streamCall.OnToolResult = func(result fantasy.ToolResultContent) error {
|
||||
publishMessagePart(
|
||||
fantasy.MessageRoleTool,
|
||||
chatprompt.PartFromContent(result),
|
||||
)
|
||||
|
||||
stepStateMu.Lock()
|
||||
if result.ToolCallID != "" && strings.TrimSpace(result.ToolName) != "" {
|
||||
streamToolNames[result.ToolCallID] = result.ToolName
|
||||
}
|
||||
stepToolResultContents = append(stepToolResultContents, result)
|
||||
stepStateMu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
streamCall.OnStepFinish = func(stepResult fantasy.StepResult) error {
|
||||
contextLimit := extractContextLimit(stepResult.ProviderMetadata)
|
||||
if !contextLimit.Valid && opts.ContextLimitFallback > 0 {
|
||||
contextLimit = sql.NullInt64{
|
||||
Int64: opts.ContextLimitFallback,
|
||||
Valid: true,
|
||||
}
|
||||
}
|
||||
|
||||
return opts.PersistStep(ctx, PersistedStep{
|
||||
Content: stepResult.Content,
|
||||
Usage: stepResult.Usage,
|
||||
ContextLimit: contextLimit,
|
||||
})
|
||||
}
|
||||
|
||||
var result *fantasy.AgentResult
|
||||
err := chatretry.Retry(ctx, func(retryCtx context.Context) error {
|
||||
var streamErr error
|
||||
result, streamErr = agent.Stream(retryCtx, streamCall)
|
||||
if streamErr != nil {
|
||||
// Interrupts are not retryable — propagate them
|
||||
// immediately so processChat can set the correct
|
||||
// status.
|
||||
if errors.Is(streamErr, context.Canceled) &&
|
||||
errors.Is(context.Cause(retryCtx), ErrInterrupted) {
|
||||
if persistErr := persistInterruptedStep(); persistErr != nil {
|
||||
if opts.OnInterruptedPersistError != nil {
|
||||
opts.OnInterruptedPersistError(persistErr)
|
||||
}
|
||||
}
|
||||
// Return ErrInterrupted directly so the retry
|
||||
// loop sees a non-retryable error and stops.
|
||||
return ErrInterrupted
|
||||
}
|
||||
return streamErr
|
||||
}
|
||||
return nil
|
||||
}, func(attempt int, retryErr error, delay time.Duration) {
|
||||
// Reset accumulated draft state from the failed attempt
|
||||
// so the next attempt starts clean.
|
||||
resetStepState()
|
||||
|
||||
if opts.OnRetry != nil {
|
||||
opts.OnRetry(attempt, retryErr, delay)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrInterrupted) {
|
||||
return nil, ErrInterrupted
|
||||
}
|
||||
return nil, xerrors.Errorf("stream response: %w", err)
|
||||
}
|
||||
if opts.Compaction != nil {
|
||||
if err := maybeCompact(ctx, opts, result); err != nil {
|
||||
if opts.Compaction.OnError != nil {
|
||||
opts.Compaction.OnError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
//nolint:revive // Boolean controls Anthropic-specific caching behavior.
|
||||
func prepareStepResult(
|
||||
messages []fantasy.Message,
|
||||
sentinel string,
|
||||
activeTools []string,
|
||||
anthropicCaching bool,
|
||||
) fantasy.PrepareStepResult {
|
||||
filtered := make([]fantasy.Message, 0, len(messages))
|
||||
removed := false
|
||||
for _, message := range messages {
|
||||
if !removed &&
|
||||
message.Role == fantasy.MessageRoleUser &&
|
||||
len(message.Content) == 1 {
|
||||
textPart, ok := fantasy.AsMessagePart[fantasy.TextPart](message.Content[0])
|
||||
if ok && textPart.Text == sentinel {
|
||||
removed = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
filtered = append(filtered, message)
|
||||
}
|
||||
|
||||
result := fantasy.PrepareStepResult{
|
||||
Messages: filtered,
|
||||
}
|
||||
if anthropicCaching {
|
||||
result.Messages = addAnthropicPromptCaching(result.Messages)
|
||||
}
|
||||
if len(activeTools) > 0 {
|
||||
result.ActiveTools = append([]string(nil), activeTools...)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func shouldApplyAnthropicPromptCaching(model fantasy.LanguageModel) bool {
|
||||
if model == nil {
|
||||
return false
|
||||
}
|
||||
return model.Provider() == fantasyanthropic.Name
|
||||
}
|
||||
|
||||
func addAnthropicPromptCaching(messages []fantasy.Message) []fantasy.Message {
|
||||
for i := range messages {
|
||||
messages[i].ProviderOptions = nil
|
||||
}
|
||||
|
||||
providerOption := fantasy.ProviderOptions{
|
||||
fantasyanthropic.Name: &fantasyanthropic.ProviderCacheControlOptions{
|
||||
CacheControl: fantasyanthropic.CacheControl{Type: "ephemeral"},
|
||||
},
|
||||
}
|
||||
|
||||
lastSystemRoleIdx := -1
|
||||
systemMessageUpdated := false
|
||||
for i, msg := range messages {
|
||||
if msg.Role == fantasy.MessageRoleSystem {
|
||||
lastSystemRoleIdx = i
|
||||
} else if !systemMessageUpdated && lastSystemRoleIdx >= 0 {
|
||||
messages[lastSystemRoleIdx].ProviderOptions = providerOption
|
||||
systemMessageUpdated = true
|
||||
}
|
||||
if i > len(messages)-3 {
|
||||
messages[i].ProviderOptions = providerOption
|
||||
}
|
||||
}
|
||||
|
||||
return messages
|
||||
}
|
||||
|
||||
func extractContextLimit(metadata fantasy.ProviderMetadata) sql.NullInt64 {
|
||||
if len(metadata) == 0 {
|
||||
return sql.NullInt64{}
|
||||
}
|
||||
|
||||
encoded, err := json.Marshal(metadata)
|
||||
if err != nil || len(encoded) == 0 {
|
||||
return sql.NullInt64{}
|
||||
}
|
||||
|
||||
var payload any
|
||||
if err := json.Unmarshal(encoded, &payload); err != nil {
|
||||
return sql.NullInt64{}
|
||||
}
|
||||
|
||||
limit, ok := findContextLimitValue(payload)
|
||||
if !ok {
|
||||
return sql.NullInt64{}
|
||||
}
|
||||
|
||||
return sql.NullInt64{
|
||||
Int64: limit,
|
||||
Valid: true,
|
||||
}
|
||||
}
|
||||
|
||||
func findContextLimitValue(value any) (int64, bool) {
|
||||
var (
|
||||
limit int64
|
||||
found bool
|
||||
)
|
||||
|
||||
collectContextLimitValues(value, func(candidate int64) {
|
||||
if !found || candidate > limit {
|
||||
limit = candidate
|
||||
found = true
|
||||
}
|
||||
})
|
||||
|
||||
return limit, found
|
||||
}
|
||||
|
||||
func collectContextLimitValues(value any, onValue func(int64)) {
|
||||
switch typed := value.(type) {
|
||||
case map[string]any:
|
||||
for key, child := range typed {
|
||||
if isContextLimitKey(key) {
|
||||
if numeric, ok := numericContextLimitValue(child); ok {
|
||||
onValue(numeric)
|
||||
}
|
||||
}
|
||||
collectContextLimitValues(child, onValue)
|
||||
}
|
||||
case []any:
|
||||
for _, child := range typed {
|
||||
collectContextLimitValues(child, onValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isContextLimitKey(key string) bool {
|
||||
normalized := normalizeMetadataKey(key)
|
||||
if normalized == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
switch normalized {
|
||||
case
|
||||
"contextlimit",
|
||||
"contextwindow",
|
||||
"contextlength",
|
||||
"maxcontext",
|
||||
"maxcontexttokens",
|
||||
"maxinputtokens",
|
||||
"maxinputtoken",
|
||||
"inputtokenlimit":
|
||||
return true
|
||||
}
|
||||
|
||||
return strings.Contains(normalized, "context") &&
|
||||
(strings.Contains(normalized, "limit") ||
|
||||
strings.Contains(normalized, "window") ||
|
||||
strings.Contains(normalized, "length") ||
|
||||
strings.HasPrefix(normalized, "max"))
|
||||
}
|
||||
|
||||
func normalizeMetadataKey(key string) string {
|
||||
var b strings.Builder
|
||||
b.Grow(len(key))
|
||||
|
||||
for _, r := range key {
|
||||
switch {
|
||||
case r >= 'a' && r <= 'z':
|
||||
_, _ = b.WriteRune(r)
|
||||
case r >= 'A' && r <= 'Z':
|
||||
_, _ = b.WriteRune(r + ('a' - 'A'))
|
||||
case r >= '0' && r <= '9':
|
||||
_, _ = b.WriteRune(r)
|
||||
}
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func numericContextLimitValue(value any) (int64, bool) {
|
||||
switch typed := value.(type) {
|
||||
case int64:
|
||||
return positiveInt64(typed)
|
||||
case int32:
|
||||
return positiveInt64(int64(typed))
|
||||
case int:
|
||||
return positiveInt64(int64(typed))
|
||||
case float64:
|
||||
casted := int64(typed)
|
||||
if typed > 0 && float64(casted) == typed {
|
||||
return casted, true
|
||||
}
|
||||
case string:
|
||||
parsed, err := strconv.ParseInt(strings.TrimSpace(typed), 10, 64)
|
||||
if err == nil {
|
||||
return positiveInt64(parsed)
|
||||
}
|
||||
case json.Number:
|
||||
parsed, err := typed.Int64()
|
||||
if err == nil {
|
||||
return positiveInt64(parsed)
|
||||
}
|
||||
}
|
||||
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func positiveInt64(value int64) (int64, bool) {
|
||||
if value <= 0 {
|
||||
return 0, false
|
||||
}
|
||||
return value, true
|
||||
}
|
||||
@@ -0,0 +1,289 @@
|
||||
package chatloop //nolint:testpackage // Uses internal symbols.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"iter"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"charm.land/fantasy"
|
||||
fantasyanthropic "charm.land/fantasy/providers/anthropic"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
const activeToolName = "read_file"
|
||||
|
||||
func TestRun_ActiveToolsPrepareBehavior(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var capturedCall fantasy.Call
|
||||
model := &loopTestModel{
|
||||
provider: fantasyanthropic.Name,
|
||||
streamFn: func(_ context.Context, call fantasy.Call) (fantasy.StreamResponse, error) {
|
||||
capturedCall = call
|
||||
return streamFromParts([]fantasy.StreamPart{
|
||||
{Type: fantasy.StreamPartTypeTextStart, ID: "text-1"},
|
||||
{Type: fantasy.StreamPartTypeTextDelta, ID: "text-1", Delta: "done"},
|
||||
{Type: fantasy.StreamPartTypeTextEnd, ID: "text-1"},
|
||||
{Type: fantasy.StreamPartTypeFinish, FinishReason: fantasy.FinishReasonStop},
|
||||
}), nil
|
||||
},
|
||||
}
|
||||
|
||||
persistStepCalls := 0
|
||||
var persistedStep PersistedStep
|
||||
|
||||
_, err := Run(context.Background(), RunOptions{
|
||||
Model: model,
|
||||
Messages: []fantasy.Message{
|
||||
textMessage(fantasy.MessageRoleSystem, "sys-1"),
|
||||
textMessage(fantasy.MessageRoleSystem, "sys-2"),
|
||||
textMessage(fantasy.MessageRoleUser, "hello"),
|
||||
textMessage(fantasy.MessageRoleAssistant, "working"),
|
||||
textMessage(fantasy.MessageRoleUser, "continue"),
|
||||
},
|
||||
Tools: []fantasy.AgentTool{
|
||||
newNoopTool(activeToolName),
|
||||
newNoopTool("write_file"),
|
||||
},
|
||||
MaxSteps: 3,
|
||||
ActiveTools: []string{activeToolName},
|
||||
ContextLimitFallback: 4096,
|
||||
PersistStep: func(_ context.Context, step PersistedStep) error {
|
||||
persistStepCalls++
|
||||
persistedStep = step
|
||||
return nil
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, persistStepCalls)
|
||||
require.True(t, persistedStep.ContextLimit.Valid)
|
||||
require.Equal(t, int64(4096), persistedStep.ContextLimit.Int64)
|
||||
|
||||
require.NotEmpty(t, capturedCall.Prompt)
|
||||
require.False(t, containsPromptSentinel(capturedCall.Prompt))
|
||||
require.Len(t, capturedCall.Tools, 1)
|
||||
require.Equal(t, activeToolName, capturedCall.Tools[0].GetName())
|
||||
|
||||
require.Len(t, capturedCall.Prompt, 5)
|
||||
require.False(t, hasAnthropicEphemeralCacheControl(capturedCall.Prompt[0]))
|
||||
require.True(t, hasAnthropicEphemeralCacheControl(capturedCall.Prompt[1]))
|
||||
require.False(t, hasAnthropicEphemeralCacheControl(capturedCall.Prompt[2]))
|
||||
require.True(t, hasAnthropicEphemeralCacheControl(capturedCall.Prompt[3]))
|
||||
require.True(t, hasAnthropicEphemeralCacheControl(capturedCall.Prompt[4]))
|
||||
}
|
||||
|
||||
func TestRun_InterruptedStepPersistsSyntheticToolResult(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
started := make(chan struct{})
|
||||
model := &loopTestModel{
|
||||
provider: "fake",
|
||||
streamFn: func(ctx context.Context, _ fantasy.Call) (fantasy.StreamResponse, error) {
|
||||
return iter.Seq[fantasy.StreamPart](func(yield func(fantasy.StreamPart) bool) {
|
||||
parts := []fantasy.StreamPart{
|
||||
{
|
||||
Type: fantasy.StreamPartTypeToolInputStart,
|
||||
ID: "interrupt-tool-1",
|
||||
ToolCallName: "read_file",
|
||||
},
|
||||
{
|
||||
Type: fantasy.StreamPartTypeToolInputDelta,
|
||||
ID: "interrupt-tool-1",
|
||||
ToolCallName: "read_file",
|
||||
Delta: `{"path":"main.go"`,
|
||||
},
|
||||
{Type: fantasy.StreamPartTypeTextStart, ID: "text-1"},
|
||||
{Type: fantasy.StreamPartTypeTextDelta, ID: "text-1", Delta: "partial assistant output"},
|
||||
}
|
||||
for _, part := range parts {
|
||||
if !yield(part) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-started:
|
||||
default:
|
||||
close(started)
|
||||
}
|
||||
|
||||
<-ctx.Done()
|
||||
_ = yield(fantasy.StreamPart{
|
||||
Type: fantasy.StreamPartTypeError,
|
||||
Error: ctx.Err(),
|
||||
})
|
||||
}), nil
|
||||
},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancelCause(context.Background())
|
||||
defer cancel(nil)
|
||||
|
||||
go func() {
|
||||
<-started
|
||||
cancel(ErrInterrupted)
|
||||
}()
|
||||
|
||||
persistedAssistantCtxErr := xerrors.New("unset")
|
||||
var persistedContent []fantasy.Content
|
||||
|
||||
_, err := Run(ctx, RunOptions{
|
||||
Model: model,
|
||||
Messages: []fantasy.Message{
|
||||
textMessage(fantasy.MessageRoleUser, "hello"),
|
||||
},
|
||||
Tools: []fantasy.AgentTool{
|
||||
newNoopTool("read_file"),
|
||||
},
|
||||
MaxSteps: 3,
|
||||
PersistStep: func(persistCtx context.Context, step PersistedStep) error {
|
||||
persistedAssistantCtxErr = persistCtx.Err()
|
||||
persistedContent = append([]fantasy.Content(nil), step.Content...)
|
||||
return nil
|
||||
},
|
||||
})
|
||||
require.ErrorIs(t, err, ErrInterrupted)
|
||||
require.NoError(t, persistedAssistantCtxErr)
|
||||
|
||||
require.NotEmpty(t, persistedContent)
|
||||
var (
|
||||
foundText bool
|
||||
foundToolCall bool
|
||||
foundToolResult bool
|
||||
)
|
||||
for _, block := range persistedContent {
|
||||
if text, ok := fantasy.AsContentType[fantasy.TextContent](block); ok {
|
||||
if strings.Contains(text.Text, "partial assistant output") {
|
||||
foundText = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
if toolCall, ok := fantasy.AsContentType[fantasy.ToolCallContent](block); ok {
|
||||
if toolCall.ToolCallID == "interrupt-tool-1" &&
|
||||
toolCall.ToolName == "read_file" &&
|
||||
strings.Contains(toolCall.Input, `"path":"main.go"`) {
|
||||
foundToolCall = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
if toolResult, ok := fantasy.AsContentType[fantasy.ToolResultContent](block); ok {
|
||||
if toolResult.ToolCallID == "interrupt-tool-1" &&
|
||||
toolResult.ToolName == "read_file" {
|
||||
_, isErr := toolResult.Result.(fantasy.ToolResultOutputContentError)
|
||||
require.True(t, isErr, "interrupted tool result should be an error")
|
||||
foundToolResult = true
|
||||
}
|
||||
}
|
||||
}
|
||||
require.True(t, foundText)
|
||||
require.True(t, foundToolCall)
|
||||
require.True(t, foundToolResult)
|
||||
}
|
||||
|
||||
type loopTestModel struct {
|
||||
provider string
|
||||
model string
|
||||
generateFn func(context.Context, fantasy.Call) (*fantasy.Response, error)
|
||||
streamFn func(context.Context, fantasy.Call) (fantasy.StreamResponse, error)
|
||||
}
|
||||
|
||||
func (m *loopTestModel) Provider() string {
|
||||
if m.provider != "" {
|
||||
return m.provider
|
||||
}
|
||||
return "fake"
|
||||
}
|
||||
|
||||
func (m *loopTestModel) Model() string {
|
||||
if m.model != "" {
|
||||
return m.model
|
||||
}
|
||||
return "fake"
|
||||
}
|
||||
|
||||
func (m *loopTestModel) Generate(ctx context.Context, call fantasy.Call) (*fantasy.Response, error) {
|
||||
if m.generateFn != nil {
|
||||
return m.generateFn(ctx, call)
|
||||
}
|
||||
return &fantasy.Response{}, nil
|
||||
}
|
||||
|
||||
func (m *loopTestModel) Stream(ctx context.Context, call fantasy.Call) (fantasy.StreamResponse, error) {
|
||||
if m.streamFn != nil {
|
||||
return m.streamFn(ctx, call)
|
||||
}
|
||||
return streamFromParts([]fantasy.StreamPart{{
|
||||
Type: fantasy.StreamPartTypeFinish,
|
||||
FinishReason: fantasy.FinishReasonStop,
|
||||
}}), nil
|
||||
}
|
||||
|
||||
func (*loopTestModel) GenerateObject(context.Context, fantasy.ObjectCall) (*fantasy.ObjectResponse, error) {
|
||||
return nil, xerrors.New("not implemented")
|
||||
}
|
||||
|
||||
func (*loopTestModel) StreamObject(context.Context, fantasy.ObjectCall) (fantasy.ObjectStreamResponse, error) {
|
||||
return nil, xerrors.New("not implemented")
|
||||
}
|
||||
|
||||
func streamFromParts(parts []fantasy.StreamPart) fantasy.StreamResponse {
|
||||
return iter.Seq[fantasy.StreamPart](func(yield func(fantasy.StreamPart) bool) {
|
||||
for _, part := range parts {
|
||||
if !yield(part) {
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func newNoopTool(name string) fantasy.AgentTool {
|
||||
return fantasy.NewAgentTool(
|
||||
name,
|
||||
"test noop tool",
|
||||
func(context.Context, struct{}, fantasy.ToolCall) (fantasy.ToolResponse, error) {
|
||||
return fantasy.ToolResponse{}, nil
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func textMessage(role fantasy.MessageRole, text string) fantasy.Message {
|
||||
return fantasy.Message{
|
||||
Role: role,
|
||||
Content: []fantasy.MessagePart{
|
||||
fantasy.TextPart{Text: text},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func containsPromptSentinel(prompt []fantasy.Message) bool {
|
||||
for _, message := range prompt {
|
||||
if message.Role != fantasy.MessageRoleUser || len(message.Content) != 1 {
|
||||
continue
|
||||
}
|
||||
textPart, ok := fantasy.AsMessagePart[fantasy.TextPart](message.Content[0])
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(textPart.Text, "__chatd_agent_prompt_sentinel_") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func hasAnthropicEphemeralCacheControl(message fantasy.Message) bool {
|
||||
if len(message.ProviderOptions) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
options, ok := message.ProviderOptions[fantasyanthropic.Name]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
cacheOptions, ok := options.(*fantasyanthropic.ProviderCacheControlOptions)
|
||||
return ok && cacheOptions.CacheControl.Type == "ephemeral"
|
||||
}
|
||||
@@ -0,0 +1,214 @@
|
||||
package chatloop
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"charm.land/fantasy"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultCompactionThresholdPercent = int32(70)
|
||||
minCompactionThresholdPercent = int32(0)
|
||||
maxCompactionThresholdPercent = int32(100)
|
||||
|
||||
defaultCompactionSummaryPrompt = "Summarize the current chat so a " +
|
||||
"new assistant can continue seamlessly. Include the user's goals, " +
|
||||
"decisions made, concrete technical details (files, commands, APIs), " +
|
||||
"errors encountered and fixes, and open questions. Be dense and factual. " +
|
||||
"Omit pleasantries and next-step suggestions."
|
||||
defaultCompactionSystemSummaryPrefix = "Summary of earlier chat context:"
|
||||
defaultCompactionTimeout = 90 * time.Second
|
||||
)
|
||||
|
||||
type CompactionOptions struct {
|
||||
ThresholdPercent int32
|
||||
ContextLimit int64
|
||||
SummaryPrompt string
|
||||
SystemSummaryPrefix string
|
||||
Timeout time.Duration
|
||||
Persist func(context.Context, CompactionResult) error
|
||||
OnStart func()
|
||||
OnError func(error)
|
||||
}
|
||||
|
||||
type CompactionResult struct {
|
||||
SystemSummary string
|
||||
SummaryReport string
|
||||
ThresholdPercent int32
|
||||
UsagePercent float64
|
||||
ContextTokens int64
|
||||
ContextLimit int64
|
||||
}
|
||||
|
||||
func maybeCompact(
|
||||
ctx context.Context,
|
||||
runOpts RunOptions,
|
||||
runResult *fantasy.AgentResult,
|
||||
) error {
|
||||
if runResult == nil || runOpts.Compaction == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
config := *runOpts.Compaction
|
||||
if config.Persist == nil {
|
||||
return xerrors.New("compaction persist callback is required")
|
||||
}
|
||||
if strings.TrimSpace(config.SummaryPrompt) == "" {
|
||||
config.SummaryPrompt = defaultCompactionSummaryPrompt
|
||||
}
|
||||
if strings.TrimSpace(config.SystemSummaryPrefix) == "" {
|
||||
config.SystemSummaryPrefix = defaultCompactionSystemSummaryPrefix
|
||||
}
|
||||
if config.Timeout <= 0 {
|
||||
config.Timeout = defaultCompactionTimeout
|
||||
}
|
||||
if config.ThresholdPercent < minCompactionThresholdPercent ||
|
||||
config.ThresholdPercent > maxCompactionThresholdPercent {
|
||||
config.ThresholdPercent = defaultCompactionThresholdPercent
|
||||
}
|
||||
|
||||
if config.ThresholdPercent >= maxCompactionThresholdPercent {
|
||||
return nil
|
||||
}
|
||||
if runOpts.MaxSteps > 0 && len(runResult.Steps) >= runOpts.MaxSteps {
|
||||
lastStep := runResult.Steps[len(runResult.Steps)-1]
|
||||
if lastStep.FinishReason == fantasy.FinishReasonToolCalls &&
|
||||
len(lastStep.Content.ToolCalls()) > 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
contextTokens := int64(0)
|
||||
contextLimitFromMetadata := int64(0)
|
||||
for i := len(runResult.Steps) - 1; i >= 0; i-- {
|
||||
usage := runResult.Steps[i].Usage
|
||||
total := int64(0)
|
||||
hasContextTokens := false
|
||||
|
||||
if usage.InputTokens > 0 {
|
||||
total += usage.InputTokens
|
||||
hasContextTokens = true
|
||||
}
|
||||
if usage.CacheReadTokens > 0 {
|
||||
total += usage.CacheReadTokens
|
||||
hasContextTokens = true
|
||||
}
|
||||
if usage.CacheCreationTokens > 0 {
|
||||
total += usage.CacheCreationTokens
|
||||
hasContextTokens = true
|
||||
}
|
||||
if !hasContextTokens && usage.TotalTokens > 0 {
|
||||
total = usage.TotalTokens
|
||||
hasContextTokens = true
|
||||
}
|
||||
if !hasContextTokens || total <= 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
contextTokens = total
|
||||
metadataLimit := extractContextLimit(runResult.Steps[i].ProviderMetadata)
|
||||
if metadataLimit.Valid && metadataLimit.Int64 > 0 {
|
||||
contextLimitFromMetadata = metadataLimit.Int64
|
||||
}
|
||||
break
|
||||
}
|
||||
if contextTokens <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
contextLimit := contextLimitFromMetadata
|
||||
if contextLimit <= 0 && config.ContextLimit > 0 {
|
||||
contextLimit = config.ContextLimit
|
||||
}
|
||||
if contextLimit <= 0 && runOpts.ContextLimitFallback > 0 {
|
||||
contextLimit = runOpts.ContextLimitFallback
|
||||
}
|
||||
if contextLimit <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
usagePercent := (float64(contextTokens) / float64(contextLimit)) * 100
|
||||
if usagePercent < float64(config.ThresholdPercent) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if config.OnStart != nil {
|
||||
config.OnStart()
|
||||
}
|
||||
|
||||
summary, err := generateCompactionSummary(
|
||||
ctx,
|
||||
runOpts.Model,
|
||||
runOpts.Messages,
|
||||
runResult.Steps,
|
||||
config,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if summary == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
systemSummary := strings.TrimSpace(
|
||||
config.SystemSummaryPrefix + "\n\n" + summary,
|
||||
)
|
||||
|
||||
return config.Persist(ctx, CompactionResult{
|
||||
SystemSummary: systemSummary,
|
||||
SummaryReport: summary,
|
||||
ThresholdPercent: config.ThresholdPercent,
|
||||
UsagePercent: usagePercent,
|
||||
ContextTokens: contextTokens,
|
||||
ContextLimit: contextLimit,
|
||||
})
|
||||
}
|
||||
|
||||
func generateCompactionSummary(
|
||||
ctx context.Context,
|
||||
model fantasy.LanguageModel,
|
||||
messages []fantasy.Message,
|
||||
steps []fantasy.StepResult,
|
||||
options CompactionOptions,
|
||||
) (string, error) {
|
||||
summaryPrompt := make([]fantasy.Message, 0, len(messages)+len(steps)+1)
|
||||
summaryPrompt = append(summaryPrompt, messages...)
|
||||
for _, step := range steps {
|
||||
summaryPrompt = append(summaryPrompt, step.Messages...)
|
||||
}
|
||||
summaryPrompt = append(summaryPrompt, fantasy.Message{
|
||||
Role: fantasy.MessageRoleUser,
|
||||
Content: []fantasy.MessagePart{
|
||||
fantasy.TextPart{Text: options.SummaryPrompt},
|
||||
},
|
||||
})
|
||||
toolChoice := fantasy.ToolChoiceNone
|
||||
|
||||
summaryCtx, cancel := context.WithTimeout(ctx, options.Timeout)
|
||||
defer cancel()
|
||||
|
||||
response, err := model.Generate(summaryCtx, fantasy.Call{
|
||||
Prompt: summaryPrompt,
|
||||
ToolChoice: &toolChoice,
|
||||
})
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("generate summary text: %w", err)
|
||||
}
|
||||
|
||||
parts := make([]string, 0, len(response.Content))
|
||||
for _, block := range response.Content {
|
||||
textBlock, ok := fantasy.AsContentType[fantasy.TextContent](block)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
text := strings.TrimSpace(textBlock.Text)
|
||||
if text == "" {
|
||||
continue
|
||||
}
|
||||
parts = append(parts, text)
|
||||
}
|
||||
return strings.TrimSpace(strings.Join(parts, " ")), nil
|
||||
}
|
||||
@@ -0,0 +1,239 @@
|
||||
package chatloop //nolint:testpackage // Uses internal symbols.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"charm.land/fantasy"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
func TestRun_Compaction(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("PersistsWhenThresholdReached", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
persistCompactionCalls := 0
|
||||
var persistedCompaction CompactionResult
|
||||
const summaryText = "summary text for compaction"
|
||||
|
||||
model := &loopTestModel{
|
||||
provider: "fake",
|
||||
streamFn: func(_ context.Context, _ fantasy.Call) (fantasy.StreamResponse, error) {
|
||||
return streamFromParts([]fantasy.StreamPart{
|
||||
{Type: fantasy.StreamPartTypeTextStart, ID: "text-1"},
|
||||
{Type: fantasy.StreamPartTypeTextDelta, ID: "text-1", Delta: "done"},
|
||||
{Type: fantasy.StreamPartTypeTextEnd, ID: "text-1"},
|
||||
{
|
||||
Type: fantasy.StreamPartTypeFinish,
|
||||
FinishReason: fantasy.FinishReasonStop,
|
||||
Usage: fantasy.Usage{
|
||||
InputTokens: 80,
|
||||
TotalTokens: 85,
|
||||
},
|
||||
},
|
||||
}), nil
|
||||
},
|
||||
generateFn: func(_ context.Context, call fantasy.Call) (*fantasy.Response, error) {
|
||||
require.NotEmpty(t, call.Prompt)
|
||||
lastPrompt := call.Prompt[len(call.Prompt)-1]
|
||||
require.Equal(t, fantasy.MessageRoleUser, lastPrompt.Role)
|
||||
require.Len(t, lastPrompt.Content, 1)
|
||||
|
||||
instruction, ok := fantasy.AsMessagePart[fantasy.TextPart](lastPrompt.Content[0])
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "summarize now", instruction.Text)
|
||||
|
||||
return &fantasy.Response{
|
||||
Content: []fantasy.Content{
|
||||
fantasy.TextContent{Text: summaryText},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Run(context.Background(), RunOptions{
|
||||
Model: model,
|
||||
Messages: []fantasy.Message{
|
||||
textMessage(fantasy.MessageRoleUser, "hello"),
|
||||
},
|
||||
MaxSteps: 1,
|
||||
PersistStep: func(_ context.Context, _ PersistedStep) error {
|
||||
return nil
|
||||
},
|
||||
ContextLimitFallback: 100,
|
||||
Compaction: &CompactionOptions{
|
||||
ThresholdPercent: 70,
|
||||
SummaryPrompt: "summarize now",
|
||||
Persist: func(_ context.Context, result CompactionResult) error {
|
||||
persistCompactionCalls++
|
||||
persistedCompaction = result
|
||||
return nil
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, persistCompactionCalls)
|
||||
require.Contains(t, persistedCompaction.SystemSummary, summaryText)
|
||||
require.Equal(t, summaryText, persistedCompaction.SummaryReport)
|
||||
require.Equal(t, int64(80), persistedCompaction.ContextTokens)
|
||||
require.Equal(t, int64(100), persistedCompaction.ContextLimit)
|
||||
require.InDelta(t, 80.0, persistedCompaction.UsagePercent, 0.0001)
|
||||
})
|
||||
|
||||
t.Run("OnStartFiresBeforePersist", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const summaryText = "compaction summary for ordering test"
|
||||
|
||||
// Track the order of callbacks to verify OnStart fires
|
||||
// before the Generate call (summary generation) and
|
||||
// before Persist.
|
||||
var callOrder []string
|
||||
|
||||
model := &loopTestModel{
|
||||
provider: "fake",
|
||||
streamFn: func(_ context.Context, _ fantasy.Call) (fantasy.StreamResponse, error) {
|
||||
return streamFromParts([]fantasy.StreamPart{
|
||||
{Type: fantasy.StreamPartTypeTextStart, ID: "text-1"},
|
||||
{Type: fantasy.StreamPartTypeTextDelta, ID: "text-1", Delta: "done"},
|
||||
{Type: fantasy.StreamPartTypeTextEnd, ID: "text-1"},
|
||||
{
|
||||
Type: fantasy.StreamPartTypeFinish,
|
||||
FinishReason: fantasy.FinishReasonStop,
|
||||
Usage: fantasy.Usage{
|
||||
InputTokens: 80,
|
||||
TotalTokens: 85,
|
||||
},
|
||||
},
|
||||
}), nil
|
||||
},
|
||||
generateFn: func(_ context.Context, _ fantasy.Call) (*fantasy.Response, error) {
|
||||
callOrder = append(callOrder, "generate")
|
||||
return &fantasy.Response{
|
||||
Content: []fantasy.Content{
|
||||
fantasy.TextContent{Text: summaryText},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Run(context.Background(), RunOptions{
|
||||
Model: model,
|
||||
Messages: []fantasy.Message{
|
||||
textMessage(fantasy.MessageRoleUser, "hello"),
|
||||
},
|
||||
MaxSteps: 1,
|
||||
PersistStep: func(_ context.Context, _ PersistedStep) error {
|
||||
return nil
|
||||
},
|
||||
ContextLimitFallback: 100,
|
||||
Compaction: &CompactionOptions{
|
||||
ThresholdPercent: 70,
|
||||
SummaryPrompt: "summarize now",
|
||||
OnStart: func() {
|
||||
callOrder = append(callOrder, "on_start")
|
||||
},
|
||||
Persist: func(_ context.Context, _ CompactionResult) error {
|
||||
callOrder = append(callOrder, "persist")
|
||||
return nil
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{"on_start", "generate", "persist"}, callOrder)
|
||||
})
|
||||
|
||||
t.Run("OnStartNotCalledBelowThreshold", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
onStartCalled := false
|
||||
|
||||
model := &loopTestModel{
|
||||
provider: "fake",
|
||||
streamFn: func(_ context.Context, _ fantasy.Call) (fantasy.StreamResponse, error) {
|
||||
return streamFromParts([]fantasy.StreamPart{
|
||||
{
|
||||
Type: fantasy.StreamPartTypeFinish,
|
||||
FinishReason: fantasy.FinishReasonStop,
|
||||
Usage: fantasy.Usage{
|
||||
InputTokens: 10,
|
||||
},
|
||||
},
|
||||
}), nil
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Run(context.Background(), RunOptions{
|
||||
Model: model,
|
||||
Messages: []fantasy.Message{
|
||||
textMessage(fantasy.MessageRoleUser, "hello"),
|
||||
},
|
||||
MaxSteps: 1,
|
||||
PersistStep: func(_ context.Context, _ PersistedStep) error {
|
||||
return nil
|
||||
},
|
||||
ContextLimitFallback: 100,
|
||||
Compaction: &CompactionOptions{
|
||||
ThresholdPercent: 70,
|
||||
OnStart: func() {
|
||||
onStartCalled = true
|
||||
},
|
||||
Persist: func(_ context.Context, _ CompactionResult) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.False(t, onStartCalled, "OnStart should not fire when usage is below threshold")
|
||||
})
|
||||
|
||||
t.Run("ErrorsAreReported", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
model := &loopTestModel{
|
||||
provider: "fake",
|
||||
streamFn: func(_ context.Context, _ fantasy.Call) (fantasy.StreamResponse, error) {
|
||||
return streamFromParts([]fantasy.StreamPart{
|
||||
{
|
||||
Type: fantasy.StreamPartTypeFinish,
|
||||
FinishReason: fantasy.FinishReasonStop,
|
||||
Usage: fantasy.Usage{
|
||||
InputTokens: 80,
|
||||
},
|
||||
},
|
||||
}), nil
|
||||
},
|
||||
generateFn: func(_ context.Context, _ fantasy.Call) (*fantasy.Response, error) {
|
||||
return nil, xerrors.New("generate failed")
|
||||
},
|
||||
}
|
||||
|
||||
compactionErr := xerrors.New("unset")
|
||||
_, err := Run(context.Background(), RunOptions{
|
||||
Model: model,
|
||||
Messages: []fantasy.Message{
|
||||
textMessage(fantasy.MessageRoleUser, "hello"),
|
||||
},
|
||||
MaxSteps: 1,
|
||||
PersistStep: func(_ context.Context, _ PersistedStep) error {
|
||||
return nil
|
||||
},
|
||||
ContextLimitFallback: 100,
|
||||
Compaction: &CompactionOptions{
|
||||
ThresholdPercent: 70,
|
||||
Persist: func(_ context.Context, _ CompactionResult) error {
|
||||
return nil
|
||||
},
|
||||
OnError: func(err error) {
|
||||
compactionErr = err
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Error(t, compactionErr)
|
||||
require.ErrorContains(t, compactionErr, "generate summary text")
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,982 @@
|
||||
package chatprompt
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"charm.land/fantasy"
|
||||
fantasyopenai "charm.land/fantasy/providers/openai"
|
||||
"github.com/sqlc-dev/pqtype"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
var toolCallIDSanitizer = regexp.MustCompile(`[^a-zA-Z0-9_-]`)
|
||||
|
||||
func ConvertMessages(
|
||||
messages []database.ChatMessage,
|
||||
) ([]fantasy.Message, error) {
|
||||
prompt := make([]fantasy.Message, 0, len(messages))
|
||||
toolNameByCallID := make(map[string]string)
|
||||
for _, message := range messages {
|
||||
visibility := message.Visibility
|
||||
if visibility == "" {
|
||||
visibility = database.ChatMessageVisibilityBoth
|
||||
}
|
||||
if visibility != database.ChatMessageVisibilityModel &&
|
||||
visibility != database.ChatMessageVisibilityBoth {
|
||||
continue
|
||||
}
|
||||
|
||||
switch message.Role {
|
||||
case string(fantasy.MessageRoleSystem):
|
||||
content, err := parseSystemContent(message.Content)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if strings.TrimSpace(content) == "" {
|
||||
continue
|
||||
}
|
||||
prompt = append(prompt, fantasy.Message{
|
||||
Role: fantasy.MessageRoleSystem,
|
||||
Content: []fantasy.MessagePart{
|
||||
fantasy.TextPart{Text: content},
|
||||
},
|
||||
})
|
||||
case string(fantasy.MessageRoleUser):
|
||||
content, err := ParseContent(string(fantasy.MessageRoleUser), message.Content)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prompt = append(prompt, fantasy.Message{
|
||||
Role: fantasy.MessageRoleUser,
|
||||
Content: ToMessageParts(content),
|
||||
})
|
||||
case string(fantasy.MessageRoleAssistant):
|
||||
content, err := ParseContent(string(fantasy.MessageRoleAssistant), message.Content)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parts := normalizeAssistantToolCallInputs(ToMessageParts(content))
|
||||
for _, toolCall := range ExtractToolCalls(parts) {
|
||||
if toolCall.ToolCallID == "" || strings.TrimSpace(toolCall.ToolName) == "" {
|
||||
continue
|
||||
}
|
||||
toolNameByCallID[sanitizeToolCallID(toolCall.ToolCallID)] = toolCall.ToolName
|
||||
}
|
||||
prompt = append(prompt, fantasy.Message{
|
||||
Role: fantasy.MessageRoleAssistant,
|
||||
Content: parts,
|
||||
})
|
||||
case string(fantasy.MessageRoleTool):
|
||||
rows, err := parseToolResultRows(message.Content)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parts := make([]fantasy.MessagePart, 0, len(rows))
|
||||
for _, row := range rows {
|
||||
if row.ToolCallID != "" && row.ToolName != "" {
|
||||
toolNameByCallID[sanitizeToolCallID(row.ToolCallID)] = row.ToolName
|
||||
}
|
||||
parts = append(parts, row.toToolResultPart())
|
||||
}
|
||||
prompt = append(prompt, fantasy.Message{
|
||||
Role: fantasy.MessageRoleTool,
|
||||
Content: parts,
|
||||
})
|
||||
default:
|
||||
return nil, xerrors.Errorf("unsupported chat message role %q", message.Role)
|
||||
}
|
||||
}
|
||||
prompt = injectMissingToolResults(prompt)
|
||||
prompt = injectMissingToolUses(
|
||||
prompt,
|
||||
toolNameByCallID,
|
||||
)
|
||||
return prompt, nil
|
||||
}
|
||||
|
||||
// PrependSystem prepends a system message unless an existing system
|
||||
// message already mentions create_workspace guidance.
|
||||
func PrependSystem(prompt []fantasy.Message, instruction string) []fantasy.Message {
|
||||
instruction = strings.TrimSpace(instruction)
|
||||
if instruction == "" {
|
||||
return prompt
|
||||
}
|
||||
for _, message := range prompt {
|
||||
if message.Role != fantasy.MessageRoleSystem {
|
||||
continue
|
||||
}
|
||||
for _, part := range message.Content {
|
||||
textPart, ok := fantasy.AsMessagePart[fantasy.TextPart](part)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(strings.ToLower(textPart.Text), "create_workspace") {
|
||||
return prompt
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
out := make([]fantasy.Message, 0, len(prompt)+1)
|
||||
out = append(out, fantasy.Message{
|
||||
Role: fantasy.MessageRoleSystem,
|
||||
Content: []fantasy.MessagePart{
|
||||
fantasy.TextPart{Text: instruction},
|
||||
},
|
||||
})
|
||||
out = append(out, prompt...)
|
||||
return out
|
||||
}
|
||||
|
||||
// InsertSystem inserts a system message after the existing system
|
||||
// block and before the first non-system message.
|
||||
func InsertSystem(prompt []fantasy.Message, instruction string) []fantasy.Message {
|
||||
instruction = strings.TrimSpace(instruction)
|
||||
if instruction == "" {
|
||||
return prompt
|
||||
}
|
||||
|
||||
systemMessage := fantasy.Message{
|
||||
Role: fantasy.MessageRoleSystem,
|
||||
Content: []fantasy.MessagePart{
|
||||
fantasy.TextPart{Text: instruction},
|
||||
},
|
||||
}
|
||||
|
||||
out := make([]fantasy.Message, 0, len(prompt)+1)
|
||||
inserted := false
|
||||
for _, message := range prompt {
|
||||
if !inserted && message.Role != fantasy.MessageRoleSystem {
|
||||
out = append(out, systemMessage)
|
||||
inserted = true
|
||||
}
|
||||
out = append(out, message)
|
||||
}
|
||||
if !inserted {
|
||||
out = append(out, systemMessage)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// AppendUser appends an instruction as a user message at the end of
|
||||
// the prompt.
|
||||
func AppendUser(prompt []fantasy.Message, instruction string) []fantasy.Message {
|
||||
instruction = strings.TrimSpace(instruction)
|
||||
if instruction == "" {
|
||||
return prompt
|
||||
}
|
||||
out := make([]fantasy.Message, 0, len(prompt)+1)
|
||||
out = append(out, prompt...)
|
||||
out = append(out, fantasy.Message{
|
||||
Role: fantasy.MessageRoleUser,
|
||||
Content: []fantasy.MessagePart{
|
||||
fantasy.TextPart{Text: instruction},
|
||||
},
|
||||
})
|
||||
return out
|
||||
}
|
||||
|
||||
// ParseContent decodes persisted chat message content blocks.
|
||||
func ParseContent(role string, raw pqtype.NullRawMessage) ([]fantasy.Content, error) {
|
||||
if !raw.Valid || len(raw.RawMessage) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var text string
|
||||
if err := json.Unmarshal(raw.RawMessage, &text); err == nil {
|
||||
return []fantasy.Content{fantasy.TextContent{Text: text}}, nil
|
||||
}
|
||||
|
||||
var rawBlocks []json.RawMessage
|
||||
if err := json.Unmarshal(raw.RawMessage, &rawBlocks); err != nil {
|
||||
return nil, xerrors.Errorf("parse %s content: %w", role, err)
|
||||
}
|
||||
|
||||
content := make([]fantasy.Content, 0, len(rawBlocks))
|
||||
for i, rawBlock := range rawBlocks {
|
||||
block, err := fantasy.UnmarshalContent(rawBlock)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse %s content block %d: %w", role, i, err)
|
||||
}
|
||||
content = append(content, block)
|
||||
}
|
||||
return content, nil
|
||||
}
|
||||
|
||||
// toolResultRaw is an untyped representation of a persisted tool
|
||||
// result row. We intentionally avoid a strict Go struct so that
|
||||
// historical shapes are never rejected.
|
||||
type toolResultRaw struct {
|
||||
ToolCallID string `json:"tool_call_id"`
|
||||
ToolName string `json:"tool_name"`
|
||||
Result json.RawMessage `json:"result"`
|
||||
IsError bool `json:"is_error,omitempty"`
|
||||
}
|
||||
|
||||
// parseToolResultRows decodes persisted tool result rows.
|
||||
func parseToolResultRows(raw pqtype.NullRawMessage) ([]toolResultRaw, error) {
|
||||
if !raw.Valid || len(raw.RawMessage) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var rows []toolResultRaw
|
||||
if err := json.Unmarshal(raw.RawMessage, &rows); err != nil {
|
||||
return nil, xerrors.Errorf("parse tool content: %w", err)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
func (r toolResultRaw) toToolResultPart() fantasy.ToolResultPart {
|
||||
toolCallID := sanitizeToolCallID(r.ToolCallID)
|
||||
resultText := string(r.Result)
|
||||
if resultText == "" || resultText == "null" {
|
||||
resultText = "{}"
|
||||
}
|
||||
|
||||
if r.IsError {
|
||||
message := strings.TrimSpace(resultText)
|
||||
if extracted := extractErrorString(r.Result); extracted != "" {
|
||||
message = extracted
|
||||
}
|
||||
return fantasy.ToolResultPart{
|
||||
ToolCallID: toolCallID,
|
||||
Output: fantasy.ToolResultOutputContentError{
|
||||
Error: xerrors.New(message),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return fantasy.ToolResultPart{
|
||||
ToolCallID: toolCallID,
|
||||
Output: fantasy.ToolResultOutputContentText{
|
||||
Text: resultText,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// extractErrorString pulls the "error" field from a JSON object if
|
||||
// present, returning it as a string. Returns "" if the field is
|
||||
// missing or the input is not an object.
|
||||
func extractErrorString(raw json.RawMessage) string {
|
||||
var fields map[string]json.RawMessage
|
||||
if err := json.Unmarshal(raw, &fields); err != nil {
|
||||
return ""
|
||||
}
|
||||
errField, ok := fields["error"]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
var s string
|
||||
if err := json.Unmarshal(errField, &s); err != nil {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSpace(s)
|
||||
}
|
||||
|
||||
// ToMessageParts converts fantasy content blocks into message parts.
|
||||
func ToMessageParts(content []fantasy.Content) []fantasy.MessagePart {
|
||||
parts := make([]fantasy.MessagePart, 0, len(content))
|
||||
for _, block := range content {
|
||||
switch value := block.(type) {
|
||||
case fantasy.TextContent:
|
||||
parts = append(parts, fantasy.TextPart{
|
||||
Text: value.Text,
|
||||
ProviderOptions: fantasy.ProviderOptions(value.ProviderMetadata),
|
||||
})
|
||||
case *fantasy.TextContent:
|
||||
parts = append(parts, fantasy.TextPart{
|
||||
Text: value.Text,
|
||||
ProviderOptions: fantasy.ProviderOptions(value.ProviderMetadata),
|
||||
})
|
||||
case fantasy.ReasoningContent:
|
||||
parts = append(parts, fantasy.ReasoningPart{
|
||||
Text: value.Text,
|
||||
ProviderOptions: fantasy.ProviderOptions(value.ProviderMetadata),
|
||||
})
|
||||
case *fantasy.ReasoningContent:
|
||||
parts = append(parts, fantasy.ReasoningPart{
|
||||
Text: value.Text,
|
||||
ProviderOptions: fantasy.ProviderOptions(value.ProviderMetadata),
|
||||
})
|
||||
case fantasy.ToolCallContent:
|
||||
parts = append(parts, fantasy.ToolCallPart{
|
||||
ToolCallID: sanitizeToolCallID(value.ToolCallID),
|
||||
ToolName: value.ToolName,
|
||||
Input: value.Input,
|
||||
ProviderExecuted: value.ProviderExecuted,
|
||||
ProviderOptions: fantasy.ProviderOptions(value.ProviderMetadata),
|
||||
})
|
||||
case *fantasy.ToolCallContent:
|
||||
parts = append(parts, fantasy.ToolCallPart{
|
||||
ToolCallID: sanitizeToolCallID(value.ToolCallID),
|
||||
ToolName: value.ToolName,
|
||||
Input: value.Input,
|
||||
ProviderExecuted: value.ProviderExecuted,
|
||||
ProviderOptions: fantasy.ProviderOptions(value.ProviderMetadata),
|
||||
})
|
||||
case fantasy.FileContent:
|
||||
parts = append(parts, fantasy.FilePart{
|
||||
Data: value.Data,
|
||||
MediaType: value.MediaType,
|
||||
ProviderOptions: fantasy.ProviderOptions(value.ProviderMetadata),
|
||||
})
|
||||
case *fantasy.FileContent:
|
||||
parts = append(parts, fantasy.FilePart{
|
||||
Data: value.Data,
|
||||
MediaType: value.MediaType,
|
||||
ProviderOptions: fantasy.ProviderOptions(value.ProviderMetadata),
|
||||
})
|
||||
case fantasy.ToolResultContent:
|
||||
parts = append(parts, fantasy.ToolResultPart{
|
||||
ToolCallID: sanitizeToolCallID(value.ToolCallID),
|
||||
Output: value.Result,
|
||||
ProviderOptions: fantasy.ProviderOptions(value.ProviderMetadata),
|
||||
})
|
||||
case *fantasy.ToolResultContent:
|
||||
parts = append(parts, fantasy.ToolResultPart{
|
||||
ToolCallID: sanitizeToolCallID(value.ToolCallID),
|
||||
Output: value.Result,
|
||||
ProviderOptions: fantasy.ProviderOptions(value.ProviderMetadata),
|
||||
})
|
||||
}
|
||||
}
|
||||
return parts
|
||||
}
|
||||
|
||||
func normalizeAssistantToolCallInputs(
|
||||
parts []fantasy.MessagePart,
|
||||
) []fantasy.MessagePart {
|
||||
normalized := make([]fantasy.MessagePart, 0, len(parts))
|
||||
for _, part := range parts {
|
||||
toolCall, ok := fantasy.AsMessagePart[fantasy.ToolCallPart](part)
|
||||
if !ok {
|
||||
normalized = append(normalized, part)
|
||||
continue
|
||||
}
|
||||
|
||||
toolCall.Input = normalizeToolCallInput(toolCall.Input)
|
||||
normalized = append(normalized, toolCall)
|
||||
}
|
||||
return normalized
|
||||
}
|
||||
|
||||
// normalizeToolCallInput guarantees tool call input is a JSON object string.
|
||||
// Anthropic drops assistant tool calls with malformed input, which can leave
|
||||
// following tool results orphaned.
|
||||
func normalizeToolCallInput(input string) string {
|
||||
input = strings.TrimSpace(input)
|
||||
if input == "" {
|
||||
return "{}"
|
||||
}
|
||||
|
||||
var object map[string]any
|
||||
if err := json.Unmarshal([]byte(input), &object); err != nil || object == nil {
|
||||
return "{}"
|
||||
}
|
||||
|
||||
return input
|
||||
}
|
||||
|
||||
// ExtractToolCalls returns all tool call parts as content blocks.
|
||||
func ExtractToolCalls(parts []fantasy.MessagePart) []fantasy.ToolCallContent {
|
||||
toolCalls := make([]fantasy.ToolCallContent, 0, len(parts))
|
||||
for _, part := range parts {
|
||||
toolCall, ok := fantasy.AsMessagePart[fantasy.ToolCallPart](part)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
toolCalls = append(toolCalls, fantasy.ToolCallContent{
|
||||
ToolCallID: toolCall.ToolCallID,
|
||||
ToolName: toolCall.ToolName,
|
||||
Input: toolCall.Input,
|
||||
ProviderExecuted: toolCall.ProviderExecuted,
|
||||
})
|
||||
}
|
||||
return toolCalls
|
||||
}
|
||||
|
||||
// MarshalContent encodes message content blocks for persistence.
|
||||
func MarshalContent(blocks []fantasy.Content) (pqtype.NullRawMessage, error) {
|
||||
if len(blocks) == 0 {
|
||||
return pqtype.NullRawMessage{}, nil
|
||||
}
|
||||
|
||||
encodedBlocks := make([]json.RawMessage, 0, len(blocks))
|
||||
for i, block := range blocks {
|
||||
encoded, err := marshalContentBlock(block)
|
||||
if err != nil {
|
||||
return pqtype.NullRawMessage{}, xerrors.Errorf(
|
||||
"encode content block %d: %w",
|
||||
i,
|
||||
err,
|
||||
)
|
||||
}
|
||||
encodedBlocks = append(encodedBlocks, encoded)
|
||||
}
|
||||
|
||||
data, err := json.Marshal(encodedBlocks)
|
||||
if err != nil {
|
||||
return pqtype.NullRawMessage{}, xerrors.Errorf("encode content blocks: %w", err)
|
||||
}
|
||||
return pqtype.NullRawMessage{RawMessage: data, Valid: true}, nil
|
||||
}
|
||||
|
||||
// MarshalToolResult encodes a single tool result for persistence as
|
||||
// an opaque JSON blob. The stored shape is
|
||||
// [{"tool_call_id":…,"tool_name":…,"result":…,"is_error":…}].
|
||||
func MarshalToolResult(toolCallID, toolName string, result json.RawMessage, isError bool) (pqtype.NullRawMessage, error) {
|
||||
row := toolResultRaw{
|
||||
ToolCallID: toolCallID,
|
||||
ToolName: toolName,
|
||||
Result: result,
|
||||
IsError: isError,
|
||||
}
|
||||
data, err := json.Marshal([]toolResultRaw{row})
|
||||
if err != nil {
|
||||
return pqtype.NullRawMessage{}, xerrors.Errorf("encode tool result: %w", err)
|
||||
}
|
||||
return pqtype.NullRawMessage{RawMessage: data, Valid: true}, nil
|
||||
}
|
||||
|
||||
// MarshalToolResultContent encodes a fantasy tool result content
|
||||
// block for persistence. It extracts the raw fields and delegates
|
||||
// to MarshalToolResult.
|
||||
func MarshalToolResultContent(content fantasy.ToolResultContent) (pqtype.NullRawMessage, error) {
|
||||
var result json.RawMessage
|
||||
var isError bool
|
||||
|
||||
switch output := content.Result.(type) {
|
||||
case fantasy.ToolResultOutputContentError:
|
||||
isError = true
|
||||
if output.Error != nil {
|
||||
result, _ = json.Marshal(map[string]any{"error": output.Error.Error()})
|
||||
} else {
|
||||
result = []byte(`{"error":""}`)
|
||||
}
|
||||
case fantasy.ToolResultOutputContentText:
|
||||
result = json.RawMessage(output.Text)
|
||||
if !json.Valid(result) {
|
||||
result, _ = json.Marshal(map[string]any{"output": output.Text})
|
||||
}
|
||||
case fantasy.ToolResultOutputContentMedia:
|
||||
result, _ = json.Marshal(map[string]any{
|
||||
"data": output.Data,
|
||||
"mime_type": output.MediaType,
|
||||
"text": output.Text,
|
||||
})
|
||||
default:
|
||||
result = []byte(`{}`)
|
||||
}
|
||||
|
||||
return MarshalToolResult(content.ToolCallID, content.ToolName, result, isError)
|
||||
}
|
||||
|
||||
// PartFromContent converts fantasy content into a SDK chat message part.
|
||||
func PartFromContent(block fantasy.Content) codersdk.ChatMessagePart {
|
||||
switch value := block.(type) {
|
||||
case fantasy.TextContent:
|
||||
return codersdk.ChatMessagePart{
|
||||
Type: codersdk.ChatMessagePartTypeText,
|
||||
Text: value.Text,
|
||||
}
|
||||
case *fantasy.TextContent:
|
||||
return codersdk.ChatMessagePart{
|
||||
Type: codersdk.ChatMessagePartTypeText,
|
||||
Text: value.Text,
|
||||
}
|
||||
case fantasy.ReasoningContent:
|
||||
return codersdk.ChatMessagePart{
|
||||
Type: codersdk.ChatMessagePartTypeReasoning,
|
||||
Text: value.Text,
|
||||
Title: reasoningSummaryTitle(value.ProviderMetadata),
|
||||
}
|
||||
case *fantasy.ReasoningContent:
|
||||
return codersdk.ChatMessagePart{
|
||||
Type: codersdk.ChatMessagePartTypeReasoning,
|
||||
Text: value.Text,
|
||||
Title: reasoningSummaryTitle(value.ProviderMetadata),
|
||||
}
|
||||
case fantasy.ToolCallContent:
|
||||
return codersdk.ChatMessagePart{
|
||||
Type: codersdk.ChatMessagePartTypeToolCall,
|
||||
ToolCallID: value.ToolCallID,
|
||||
ToolName: value.ToolName,
|
||||
Args: []byte(value.Input),
|
||||
}
|
||||
case *fantasy.ToolCallContent:
|
||||
return codersdk.ChatMessagePart{
|
||||
Type: codersdk.ChatMessagePartTypeToolCall,
|
||||
ToolCallID: value.ToolCallID,
|
||||
ToolName: value.ToolName,
|
||||
Args: []byte(value.Input),
|
||||
}
|
||||
case fantasy.SourceContent:
|
||||
return codersdk.ChatMessagePart{
|
||||
Type: codersdk.ChatMessagePartTypeSource,
|
||||
SourceID: value.ID,
|
||||
URL: value.URL,
|
||||
Title: value.Title,
|
||||
}
|
||||
case *fantasy.SourceContent:
|
||||
return codersdk.ChatMessagePart{
|
||||
Type: codersdk.ChatMessagePartTypeSource,
|
||||
SourceID: value.ID,
|
||||
URL: value.URL,
|
||||
Title: value.Title,
|
||||
}
|
||||
case fantasy.FileContent:
|
||||
return codersdk.ChatMessagePart{
|
||||
Type: codersdk.ChatMessagePartTypeFile,
|
||||
MediaType: value.MediaType,
|
||||
Data: value.Data,
|
||||
}
|
||||
case *fantasy.FileContent:
|
||||
return codersdk.ChatMessagePart{
|
||||
Type: codersdk.ChatMessagePartTypeFile,
|
||||
MediaType: value.MediaType,
|
||||
Data: value.Data,
|
||||
}
|
||||
case fantasy.ToolResultContent:
|
||||
return toolResultContentToPart(value)
|
||||
case *fantasy.ToolResultContent:
|
||||
return toolResultContentToPart(*value)
|
||||
default:
|
||||
return codersdk.ChatMessagePart{}
|
||||
}
|
||||
}
|
||||
|
||||
// ToolResultToPart converts a tool call ID, raw result, and error
|
||||
// flag into a ChatMessagePart. This is the minimal conversion used
|
||||
// both during streaming and when reading from the database.
|
||||
func ToolResultToPart(toolCallID, toolName string, result json.RawMessage, isError bool) codersdk.ChatMessagePart {
|
||||
return codersdk.ChatMessagePart{
|
||||
Type: codersdk.ChatMessagePartTypeToolResult,
|
||||
ToolCallID: toolCallID,
|
||||
ToolName: toolName,
|
||||
Result: result,
|
||||
IsError: isError,
|
||||
}
|
||||
}
|
||||
|
||||
// toolResultContentToPart converts a fantasy ToolResultContent
|
||||
// directly into a ChatMessagePart without an intermediate struct.
|
||||
func toolResultContentToPart(content fantasy.ToolResultContent) codersdk.ChatMessagePart {
|
||||
var result json.RawMessage
|
||||
var isError bool
|
||||
|
||||
switch output := content.Result.(type) {
|
||||
case fantasy.ToolResultOutputContentError:
|
||||
isError = true
|
||||
if output.Error != nil {
|
||||
result, _ = json.Marshal(map[string]any{"error": output.Error.Error()})
|
||||
} else {
|
||||
result = []byte(`{"error":""}`)
|
||||
}
|
||||
case fantasy.ToolResultOutputContentText:
|
||||
result = json.RawMessage(output.Text)
|
||||
// Ensure valid JSON; wrap in an object if not.
|
||||
if !json.Valid(result) {
|
||||
result, _ = json.Marshal(map[string]any{"output": output.Text})
|
||||
}
|
||||
case fantasy.ToolResultOutputContentMedia:
|
||||
result, _ = json.Marshal(map[string]any{
|
||||
"data": output.Data,
|
||||
"mime_type": output.MediaType,
|
||||
"text": output.Text,
|
||||
})
|
||||
default:
|
||||
result = []byte(`{}`)
|
||||
}
|
||||
|
||||
return ToolResultToPart(content.ToolCallID, content.ToolName, result, isError)
|
||||
}
|
||||
|
||||
// ReasoningTitleFromFirstLine extracts a compact markdown title.
|
||||
func ReasoningTitleFromFirstLine(text string) string {
|
||||
text = strings.TrimSpace(text)
|
||||
if text == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
firstLine := text
|
||||
if idx := strings.IndexAny(firstLine, "\r\n"); idx >= 0 {
|
||||
firstLine = firstLine[:idx]
|
||||
}
|
||||
firstLine = strings.TrimSpace(firstLine)
|
||||
if firstLine == "" || !strings.HasPrefix(firstLine, "**") {
|
||||
return ""
|
||||
}
|
||||
|
||||
rest := firstLine[2:]
|
||||
end := strings.Index(rest, "**")
|
||||
if end < 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
title := strings.TrimSpace(rest[:end])
|
||||
if title == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Require the first line to be exactly "**title**" (ignoring
|
||||
// surrounding whitespace) so providers without this format don't
|
||||
// accidentally emit a title.
|
||||
if strings.TrimSpace(rest[end+2:]) != "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
return compactReasoningSummaryTitle(title)
|
||||
}
|
||||
|
||||
func injectMissingToolResults(prompt []fantasy.Message) []fantasy.Message {
|
||||
result := make([]fantasy.Message, 0, len(prompt))
|
||||
for i := 0; i < len(prompt); i++ {
|
||||
msg := prompt[i]
|
||||
result = append(result, msg)
|
||||
|
||||
if msg.Role != fantasy.MessageRoleAssistant {
|
||||
continue
|
||||
}
|
||||
toolCalls := ExtractToolCalls(msg.Content)
|
||||
if len(toolCalls) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Collect the tool call IDs that have results in the
|
||||
// following tool message(s).
|
||||
answered := make(map[string]struct{})
|
||||
j := i + 1
|
||||
for ; j < len(prompt); j++ {
|
||||
if prompt[j].Role != fantasy.MessageRoleTool {
|
||||
break
|
||||
}
|
||||
for _, part := range prompt[j].Content {
|
||||
tr, ok := fantasy.AsMessagePart[fantasy.ToolResultPart](part)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
answered[tr.ToolCallID] = struct{}{}
|
||||
}
|
||||
}
|
||||
if i+1 < j {
|
||||
// Preserve persisted tool result ordering and inject any
|
||||
// synthetic results after the existing contiguous tool messages.
|
||||
result = append(result, prompt[i+1:j]...)
|
||||
i = j - 1
|
||||
}
|
||||
|
||||
// Build synthetic results for any unanswered tool calls.
|
||||
var missing []fantasy.MessagePart
|
||||
for _, tc := range toolCalls {
|
||||
if _, ok := answered[tc.ToolCallID]; !ok {
|
||||
missing = append(missing, fantasy.ToolResultPart{
|
||||
ToolCallID: tc.ToolCallID,
|
||||
Output: fantasy.ToolResultOutputContentError{
|
||||
Error: xerrors.New("tool call was interrupted and did not receive a result"),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
if len(missing) > 0 {
|
||||
result = append(result, fantasy.Message{
|
||||
Role: fantasy.MessageRoleTool,
|
||||
Content: missing,
|
||||
})
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func injectMissingToolUses(
|
||||
prompt []fantasy.Message,
|
||||
toolNameByCallID map[string]string,
|
||||
) []fantasy.Message {
|
||||
result := make([]fantasy.Message, 0, len(prompt))
|
||||
for _, msg := range prompt {
|
||||
if msg.Role != fantasy.MessageRoleTool {
|
||||
result = append(result, msg)
|
||||
continue
|
||||
}
|
||||
|
||||
toolResults := make([]fantasy.ToolResultPart, 0, len(msg.Content))
|
||||
for _, part := range msg.Content {
|
||||
toolResult, ok := fantasy.AsMessagePart[fantasy.ToolResultPart](part)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
toolResults = append(toolResults, toolResult)
|
||||
}
|
||||
if len(toolResults) == 0 {
|
||||
result = append(result, msg)
|
||||
continue
|
||||
}
|
||||
|
||||
// Walk backwards through the result to find the nearest
|
||||
// preceding assistant message (skipping over other tool
|
||||
// messages that belong to the same batch of results).
|
||||
answeredByPrevious := make(map[string]struct{})
|
||||
for k := len(result) - 1; k >= 0; k-- {
|
||||
if result[k].Role == fantasy.MessageRoleAssistant {
|
||||
for _, toolCall := range ExtractToolCalls(result[k].Content) {
|
||||
toolCallID := sanitizeToolCallID(toolCall.ToolCallID)
|
||||
if toolCallID == "" {
|
||||
continue
|
||||
}
|
||||
answeredByPrevious[toolCallID] = struct{}{}
|
||||
}
|
||||
break
|
||||
}
|
||||
if result[k].Role != fantasy.MessageRoleTool {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
matchingResults := make([]fantasy.ToolResultPart, 0, len(toolResults))
|
||||
orphanResults := make([]fantasy.ToolResultPart, 0, len(toolResults))
|
||||
for _, toolResult := range toolResults {
|
||||
toolCallID := sanitizeToolCallID(toolResult.ToolCallID)
|
||||
if _, ok := answeredByPrevious[toolCallID]; ok {
|
||||
matchingResults = append(matchingResults, toolResult)
|
||||
continue
|
||||
}
|
||||
orphanResults = append(orphanResults, toolResult)
|
||||
}
|
||||
|
||||
if len(orphanResults) == 0 {
|
||||
result = append(result, msg)
|
||||
continue
|
||||
}
|
||||
|
||||
syntheticToolUse := syntheticToolUseMessage(
|
||||
orphanResults,
|
||||
toolNameByCallID,
|
||||
)
|
||||
if len(syntheticToolUse.Content) == 0 {
|
||||
result = append(result, msg)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(matchingResults) > 0 {
|
||||
result = append(result, toolMessageFromToolResultParts(matchingResults))
|
||||
}
|
||||
result = append(result, syntheticToolUse)
|
||||
result = append(result, toolMessageFromToolResultParts(orphanResults))
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func toolMessageFromToolResultParts(results []fantasy.ToolResultPart) fantasy.Message {
|
||||
parts := make([]fantasy.MessagePart, 0, len(results))
|
||||
for _, result := range results {
|
||||
parts = append(parts, result)
|
||||
}
|
||||
return fantasy.Message{
|
||||
Role: fantasy.MessageRoleTool,
|
||||
Content: parts,
|
||||
}
|
||||
}
|
||||
|
||||
func syntheticToolUseMessage(
|
||||
toolResults []fantasy.ToolResultPart,
|
||||
toolNameByCallID map[string]string,
|
||||
) fantasy.Message {
|
||||
parts := make([]fantasy.MessagePart, 0, len(toolResults))
|
||||
seen := make(map[string]struct{}, len(toolResults))
|
||||
|
||||
for _, toolResult := range toolResults {
|
||||
toolCallID := sanitizeToolCallID(toolResult.ToolCallID)
|
||||
if toolCallID == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[toolCallID]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
toolName := strings.TrimSpace(toolNameByCallID[toolCallID])
|
||||
if toolName == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
seen[toolCallID] = struct{}{}
|
||||
parts = append(parts, fantasy.ToolCallPart{
|
||||
ToolCallID: toolCallID,
|
||||
ToolName: toolName,
|
||||
Input: "{}",
|
||||
})
|
||||
}
|
||||
|
||||
return fantasy.Message{
|
||||
Role: fantasy.MessageRoleAssistant,
|
||||
Content: parts,
|
||||
}
|
||||
}
|
||||
|
||||
func parseSystemContent(raw pqtype.NullRawMessage) (string, error) {
|
||||
if !raw.Valid || len(raw.RawMessage) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var content string
|
||||
if err := json.Unmarshal(raw.RawMessage, &content); err != nil {
|
||||
return "", xerrors.Errorf("parse system message content: %w", err)
|
||||
}
|
||||
return content, nil
|
||||
}
|
||||
|
||||
func sanitizeToolCallID(id string) string {
|
||||
if id == "" {
|
||||
return ""
|
||||
}
|
||||
return toolCallIDSanitizer.ReplaceAllString(id, "_")
|
||||
}
|
||||
|
||||
func marshalContentBlock(block fantasy.Content) (json.RawMessage, error) {
|
||||
encoded, err := json.Marshal(block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
title, ok := reasoningTitleFromContent(block)
|
||||
if !ok || title == "" {
|
||||
return encoded, nil
|
||||
}
|
||||
|
||||
var envelope struct {
|
||||
Type string `json:"type"`
|
||||
Data map[string]any `json:"data"`
|
||||
}
|
||||
if err := json.Unmarshal(encoded, &envelope); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !strings.EqualFold(envelope.Type, string(fantasy.ContentTypeReasoning)) {
|
||||
return encoded, nil
|
||||
}
|
||||
if envelope.Data == nil {
|
||||
envelope.Data = map[string]any{}
|
||||
}
|
||||
envelope.Data["title"] = title
|
||||
|
||||
encodedWithTitle, err := json.Marshal(envelope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return encodedWithTitle, nil
|
||||
}
|
||||
|
||||
func reasoningTitleFromContent(block fantasy.Content) (string, bool) {
|
||||
switch value := block.(type) {
|
||||
case fantasy.ReasoningContent:
|
||||
return ReasoningTitleFromFirstLine(value.Text), true
|
||||
case *fantasy.ReasoningContent:
|
||||
if value == nil {
|
||||
return "", false
|
||||
}
|
||||
return ReasoningTitleFromFirstLine(value.Text), true
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
func reasoningSummaryTitle(metadata fantasy.ProviderMetadata) string {
|
||||
if len(metadata) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
reasoningMetadata := fantasyopenai.GetReasoningMetadata(
|
||||
fantasy.ProviderOptions(metadata),
|
||||
)
|
||||
if reasoningMetadata == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
for _, summary := range reasoningMetadata.Summary {
|
||||
if title := compactReasoningSummaryTitle(summary); title != "" {
|
||||
return title
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func compactReasoningSummaryTitle(summary string) string {
|
||||
const maxWords = 8
|
||||
const maxRunes = 80
|
||||
|
||||
summary = strings.TrimSpace(summary)
|
||||
if summary == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
summary = strings.Trim(summary, "\"'`")
|
||||
summary = reasoningSummaryHeadline(summary)
|
||||
words := strings.Fields(summary)
|
||||
if len(words) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
truncated := false
|
||||
if len(words) > maxWords {
|
||||
words = words[:maxWords]
|
||||
truncated = true
|
||||
}
|
||||
|
||||
title := strings.Join(words, " ")
|
||||
if truncated {
|
||||
title += "…"
|
||||
}
|
||||
return truncateRunes(title, maxRunes)
|
||||
}
|
||||
|
||||
func reasoningSummaryHeadline(summary string) string {
|
||||
summary = strings.TrimSpace(summary)
|
||||
if summary == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// OpenAI summary_text may be markdown like:
|
||||
// "**Title**\n\nLonger explanation ...".
|
||||
// Keep only the heading segment for UI titles.
|
||||
if idx := strings.Index(summary, "\n\n"); idx >= 0 {
|
||||
summary = summary[:idx]
|
||||
}
|
||||
|
||||
if idx := strings.IndexAny(summary, "\r\n"); idx >= 0 {
|
||||
summary = summary[:idx]
|
||||
}
|
||||
|
||||
summary = strings.TrimSpace(summary)
|
||||
if summary == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
if strings.HasPrefix(summary, "**") {
|
||||
rest := summary[2:]
|
||||
if end := strings.Index(rest, "**"); end >= 0 {
|
||||
bold := strings.TrimSpace(rest[:end])
|
||||
if bold != "" {
|
||||
summary = bold
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return strings.TrimSpace(strings.Trim(summary, "\"'`"))
|
||||
}
|
||||
|
||||
func truncateRunes(value string, maxLen int) string {
|
||||
if maxLen <= 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
runes := []rune(value)
|
||||
if len(runes) <= maxLen {
|
||||
return value
|
||||
}
|
||||
|
||||
return string(runes[:maxLen])
|
||||
}
|
||||
@@ -0,0 +1,91 @@
|
||||
package chatprompt_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"charm.land/fantasy"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/chatd/chatprompt"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
)
|
||||
|
||||
func TestConvertMessages_NormalizesAssistantToolCallInput(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "empty input",
|
||||
input: "",
|
||||
expected: "{}",
|
||||
},
|
||||
{
|
||||
name: "invalid json",
|
||||
input: "{\"command\":",
|
||||
expected: "{}",
|
||||
},
|
||||
{
|
||||
name: "non-object json",
|
||||
input: "[]",
|
||||
expected: "{}",
|
||||
},
|
||||
{
|
||||
name: "valid object json",
|
||||
input: "{\"command\":\"ls\"}",
|
||||
expected: "{\"command\":\"ls\"}",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
assistantContent, err := chatprompt.MarshalContent([]fantasy.Content{
|
||||
fantasy.ToolCallContent{
|
||||
ToolCallID: "toolu_01C4PqN6F2493pi7Ebag8Vg7",
|
||||
ToolName: "execute",
|
||||
Input: tc.input,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
toolContent, err := chatprompt.MarshalToolResult(
|
||||
"toolu_01C4PqN6F2493pi7Ebag8Vg7",
|
||||
"execute",
|
||||
json.RawMessage(`{"error":"tool call was interrupted before it produced a result"}`),
|
||||
true,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
prompt, err := chatprompt.ConvertMessages([]database.ChatMessage{
|
||||
{
|
||||
Role: string(fantasy.MessageRoleAssistant),
|
||||
Visibility: database.ChatMessageVisibilityBoth,
|
||||
Content: assistantContent,
|
||||
},
|
||||
{
|
||||
Role: string(fantasy.MessageRoleTool),
|
||||
Visibility: database.ChatMessageVisibilityBoth,
|
||||
Content: toolContent,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, prompt, 2)
|
||||
|
||||
require.Equal(t, fantasy.MessageRoleAssistant, prompt[0].Role)
|
||||
toolCalls := chatprompt.ExtractToolCalls(prompt[0].Content)
|
||||
require.Len(t, toolCalls, 1)
|
||||
require.Equal(t, tc.expected, toolCalls[0].Input)
|
||||
require.Equal(t, "execute", toolCalls[0].ToolName)
|
||||
require.Equal(t, "toolu_01C4PqN6F2493pi7Ebag8Vg7", toolCalls[0].ToolCallID)
|
||||
|
||||
require.Equal(t, fantasy.MessageRoleTool, prompt[1].Role)
|
||||
})
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,191 @@
|
||||
package chatprovider_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fantasyanthropic "charm.land/fantasy/providers/anthropic"
|
||||
fantasyopenai "charm.land/fantasy/providers/openai"
|
||||
fantasyopenrouter "charm.land/fantasy/providers/openrouter"
|
||||
fantasyvercel "charm.land/fantasy/providers/vercel"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/chatd/chatprovider"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
func TestReasoningEffortFromChat(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
provider string
|
||||
input *string
|
||||
want *string
|
||||
}{
|
||||
{
|
||||
name: "OpenAICaseInsensitive",
|
||||
provider: "openai",
|
||||
input: stringPtr(" HIGH "),
|
||||
want: stringPtr(string(fantasyopenai.ReasoningEffortHigh)),
|
||||
},
|
||||
{
|
||||
name: "AnthropicEffort",
|
||||
provider: "anthropic",
|
||||
input: stringPtr("max"),
|
||||
want: stringPtr(string(fantasyanthropic.EffortMax)),
|
||||
},
|
||||
{
|
||||
name: "OpenRouterEffort",
|
||||
provider: "openrouter",
|
||||
input: stringPtr("medium"),
|
||||
want: stringPtr(string(fantasyopenrouter.ReasoningEffortMedium)),
|
||||
},
|
||||
{
|
||||
name: "VercelEffort",
|
||||
provider: "vercel",
|
||||
input: stringPtr("xhigh"),
|
||||
want: stringPtr(string(fantasyvercel.ReasoningEffortXHigh)),
|
||||
},
|
||||
{
|
||||
name: "InvalidEffortReturnsNil",
|
||||
provider: "openai",
|
||||
input: stringPtr("unknown"),
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "UnsupportedProviderReturnsNil",
|
||||
provider: "bedrock",
|
||||
input: stringPtr("high"),
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "NilInputReturnsNil",
|
||||
provider: "openai",
|
||||
input: nil,
|
||||
want: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got := chatprovider.ReasoningEffortFromChat(tt.provider, tt.input)
|
||||
require.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMergeMissingProviderOptions_OpenRouterNested(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
options := &codersdk.ChatModelProviderOptions{
|
||||
OpenRouter: &codersdk.ChatModelOpenRouterProviderOptions{
|
||||
Reasoning: &codersdk.ChatModelOpenRouterReasoningOptions{
|
||||
Enabled: boolPtr(true),
|
||||
},
|
||||
Provider: &codersdk.ChatModelOpenRouterProvider{
|
||||
Order: []string{"openai"},
|
||||
},
|
||||
},
|
||||
}
|
||||
defaults := &codersdk.ChatModelProviderOptions{
|
||||
OpenRouter: &codersdk.ChatModelOpenRouterProviderOptions{
|
||||
Reasoning: &codersdk.ChatModelOpenRouterReasoningOptions{
|
||||
Enabled: boolPtr(false),
|
||||
Exclude: boolPtr(true),
|
||||
MaxTokens: int64Ptr(123),
|
||||
Effort: stringPtr("high"),
|
||||
},
|
||||
IncludeUsage: boolPtr(true),
|
||||
Provider: &codersdk.ChatModelOpenRouterProvider{
|
||||
Order: []string{"anthropic"},
|
||||
AllowFallbacks: boolPtr(true),
|
||||
RequireParameters: boolPtr(false),
|
||||
DataCollection: stringPtr("allow"),
|
||||
Only: []string{"openai"},
|
||||
Ignore: []string{"foo"},
|
||||
Quantizations: []string{"int8"},
|
||||
Sort: stringPtr("latency"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
chatprovider.MergeMissingProviderOptions(&options, defaults)
|
||||
|
||||
require.NotNil(t, options)
|
||||
require.NotNil(t, options.OpenRouter)
|
||||
require.NotNil(t, options.OpenRouter.Reasoning)
|
||||
require.True(t, *options.OpenRouter.Reasoning.Enabled)
|
||||
require.Equal(t, true, *options.OpenRouter.Reasoning.Exclude)
|
||||
require.EqualValues(t, 123, *options.OpenRouter.Reasoning.MaxTokens)
|
||||
require.Equal(t, "high", *options.OpenRouter.Reasoning.Effort)
|
||||
require.NotNil(t, options.OpenRouter.IncludeUsage)
|
||||
require.True(t, *options.OpenRouter.IncludeUsage)
|
||||
|
||||
require.NotNil(t, options.OpenRouter.Provider)
|
||||
require.Equal(t, []string{"openai"}, options.OpenRouter.Provider.Order)
|
||||
require.NotNil(t, options.OpenRouter.Provider.AllowFallbacks)
|
||||
require.True(t, *options.OpenRouter.Provider.AllowFallbacks)
|
||||
require.NotNil(t, options.OpenRouter.Provider.RequireParameters)
|
||||
require.False(t, *options.OpenRouter.Provider.RequireParameters)
|
||||
require.Equal(t, "allow", *options.OpenRouter.Provider.DataCollection)
|
||||
require.Equal(t, []string{"openai"}, options.OpenRouter.Provider.Only)
|
||||
require.Equal(t, []string{"foo"}, options.OpenRouter.Provider.Ignore)
|
||||
require.Equal(t, []string{"int8"}, options.OpenRouter.Provider.Quantizations)
|
||||
require.Equal(t, "latency", *options.OpenRouter.Provider.Sort)
|
||||
}
|
||||
|
||||
func TestMergeMissingCallConfig_FillsUnsetFields(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dst := codersdk.ChatModelCallConfig{
|
||||
Temperature: float64Ptr(0.2),
|
||||
ProviderOptions: &codersdk.ChatModelProviderOptions{
|
||||
OpenAI: &codersdk.ChatModelOpenAIProviderOptions{
|
||||
User: stringPtr("alice"),
|
||||
},
|
||||
},
|
||||
}
|
||||
defaults := codersdk.ChatModelCallConfig{
|
||||
MaxOutputTokens: int64Ptr(512),
|
||||
Temperature: float64Ptr(0.9),
|
||||
TopP: float64Ptr(0.8),
|
||||
ProviderOptions: &codersdk.ChatModelProviderOptions{
|
||||
OpenAI: &codersdk.ChatModelOpenAIProviderOptions{
|
||||
User: stringPtr("bob"),
|
||||
ReasoningEffort: stringPtr("medium"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
chatprovider.MergeMissingCallConfig(&dst, defaults)
|
||||
|
||||
require.NotNil(t, dst.MaxOutputTokens)
|
||||
require.EqualValues(t, 512, *dst.MaxOutputTokens)
|
||||
require.NotNil(t, dst.Temperature)
|
||||
require.Equal(t, 0.2, *dst.Temperature)
|
||||
require.NotNil(t, dst.TopP)
|
||||
require.Equal(t, 0.8, *dst.TopP)
|
||||
require.NotNil(t, dst.ProviderOptions)
|
||||
require.NotNil(t, dst.ProviderOptions.OpenAI)
|
||||
require.Equal(t, "alice", *dst.ProviderOptions.OpenAI.User)
|
||||
require.Equal(t, "medium", *dst.ProviderOptions.OpenAI.ReasoningEffort)
|
||||
}
|
||||
|
||||
func stringPtr(value string) *string {
|
||||
return &value
|
||||
}
|
||||
|
||||
func boolPtr(value bool) *bool {
|
||||
return &value
|
||||
}
|
||||
|
||||
func int64Ptr(value int64) *int64 {
|
||||
return &value
|
||||
}
|
||||
|
||||
func float64Ptr(value float64) *float64 {
|
||||
return &value
|
||||
}
|
||||
@@ -0,0 +1,175 @@
|
||||
// Package chatretry provides retry logic for transient LLM provider
|
||||
// errors. It classifies errors as retryable or permanent and
|
||||
// implements exponential backoff matching the behavior of coder/mux.
|
||||
package chatretry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// InitialDelay is the backoff duration for the first retry
|
||||
// attempt.
|
||||
InitialDelay = 1 * time.Second
|
||||
|
||||
// MaxDelay is the upper bound for the exponential backoff
|
||||
// duration. Matches the cap used in coder/mux.
|
||||
MaxDelay = 60 * time.Second
|
||||
)
|
||||
|
||||
// nonRetryablePatterns are substrings that indicate a permanent error
|
||||
// which should not be retried. These are checked first so that
|
||||
// ambiguous messages (e.g. "bad request: rate limit") are correctly
|
||||
// classified as non-retryable.
|
||||
var nonRetryablePatterns = []string{
|
||||
"context canceled",
|
||||
"context deadline exceeded",
|
||||
"authentication",
|
||||
"unauthorized",
|
||||
"forbidden",
|
||||
"invalid api key",
|
||||
"invalid_api_key",
|
||||
"invalid model",
|
||||
"model not found",
|
||||
"model_not_found",
|
||||
"context length exceeded",
|
||||
"context_exceeded",
|
||||
"maximum context length",
|
||||
"quota",
|
||||
"billing",
|
||||
}
|
||||
|
||||
// retryablePatterns are substrings that indicate a transient error
|
||||
// worth retrying.
|
||||
var retryablePatterns = []string{
|
||||
"overloaded",
|
||||
"rate limit",
|
||||
"rate_limit",
|
||||
"too many requests",
|
||||
"server error",
|
||||
"status 500",
|
||||
"status 502",
|
||||
"status 503",
|
||||
"status 529",
|
||||
"connection reset",
|
||||
"connection refused",
|
||||
"eof",
|
||||
"broken pipe",
|
||||
"timeout",
|
||||
"unavailable",
|
||||
"service unavailable",
|
||||
}
|
||||
|
||||
// IsRetryable determines whether an error from an LLM provider is
|
||||
// transient and worth retrying. It inspects the error message and
|
||||
// any wrapped HTTP status codes for known retryable patterns.
|
||||
func IsRetryable(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// context.Canceled is always non-retryable regardless of
|
||||
// wrapping.
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return false
|
||||
}
|
||||
|
||||
lower := strings.ToLower(err.Error())
|
||||
|
||||
// Check non-retryable patterns first so they take precedence.
|
||||
for _, p := range nonRetryablePatterns {
|
||||
if strings.Contains(lower, p) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for _, p := range retryablePatterns {
|
||||
if strings.Contains(lower, p) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// StatusCodeRetryable returns true for HTTP status codes that
|
||||
// indicate a transient failure worth retrying.
|
||||
func StatusCodeRetryable(code int) bool {
|
||||
switch code {
|
||||
case 429, 500, 502, 503, 529:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Delay returns the backoff duration for the given 0-indexed attempt.
|
||||
// Uses exponential backoff: min(InitialDelay * 2^attempt, MaxDelay).
|
||||
// Matches the backoff curve used in coder/mux.
|
||||
func Delay(attempt int) time.Duration {
|
||||
d := InitialDelay
|
||||
for range attempt {
|
||||
d *= 2
|
||||
if d >= MaxDelay {
|
||||
return MaxDelay
|
||||
}
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// RetryFn is the function to retry. It receives a context and returns
|
||||
// an error. The context may be a child of the original with adjusted
|
||||
// deadlines for individual attempts.
|
||||
type RetryFn func(ctx context.Context) error
|
||||
|
||||
// OnRetryFn is called before each retry attempt with the attempt
|
||||
// number (1-indexed), the error that triggered the retry, and the
|
||||
// delay before the next attempt.
|
||||
type OnRetryFn func(attempt int, err error, delay time.Duration)
|
||||
|
||||
// Retry calls fn repeatedly until it succeeds, returns a
|
||||
// non-retryable error, or ctx is canceled. There is no max attempt
|
||||
// limit — retries continue indefinitely with exponential backoff
|
||||
// (capped at 60s), matching the behavior of coder/mux.
|
||||
//
|
||||
// The onRetry callback (if non-nil) is called before each retry
|
||||
// attempt, giving the caller a chance to reset state, log, or
|
||||
// publish status events.
|
||||
func Retry(ctx context.Context, fn RetryFn, onRetry OnRetryFn) error {
|
||||
var attempt int
|
||||
for {
|
||||
err := fn(ctx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !IsRetryable(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the caller's context is already done, return the
|
||||
// context error so cancellation propagates cleanly.
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
delay := Delay(attempt)
|
||||
|
||||
if onRetry != nil {
|
||||
onRetry(attempt+1, err, delay)
|
||||
}
|
||||
|
||||
timer := time.NewTimer(delay)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
timer.Stop()
|
||||
return ctx.Err()
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
attempt++
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,452 @@
|
||||
package chatretry_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/chatd/chatretry"
|
||||
)
|
||||
|
||||
func TestIsRetryable(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
err error
|
||||
retryable bool
|
||||
}{
|
||||
// Retryable errors.
|
||||
{
|
||||
name: "Overloaded",
|
||||
err: xerrors.New("model is overloaded, please try again"),
|
||||
retryable: true,
|
||||
},
|
||||
{
|
||||
name: "RateLimit",
|
||||
err: xerrors.New("rate limit exceeded"),
|
||||
retryable: true,
|
||||
},
|
||||
{
|
||||
name: "RateLimitUnderscore",
|
||||
err: xerrors.New("rate_limit: too many requests"),
|
||||
retryable: true,
|
||||
},
|
||||
{
|
||||
name: "TooManyRequests",
|
||||
err: xerrors.New("too many requests"),
|
||||
retryable: true,
|
||||
},
|
||||
{
|
||||
name: "HTTP429InMessage",
|
||||
err: xerrors.New("received status 429 from upstream"),
|
||||
retryable: false, // "429" alone is not a pattern; needs matching text.
|
||||
},
|
||||
{
|
||||
name: "HTTP529InMessage",
|
||||
err: xerrors.New("received status 529 from upstream"),
|
||||
retryable: true,
|
||||
},
|
||||
{
|
||||
name: "ServerError500",
|
||||
err: xerrors.New("status 500: internal server error"),
|
||||
retryable: true,
|
||||
},
|
||||
{
|
||||
name: "ServerErrorGeneric",
|
||||
err: xerrors.New("server error"),
|
||||
retryable: true,
|
||||
},
|
||||
{
|
||||
name: "ConnectionReset",
|
||||
err: xerrors.New("read tcp: connection reset by peer"),
|
||||
retryable: true,
|
||||
},
|
||||
{
|
||||
name: "ConnectionRefused",
|
||||
err: xerrors.New("dial tcp: connection refused"),
|
||||
retryable: true,
|
||||
},
|
||||
{
|
||||
name: "EOF",
|
||||
err: xerrors.New("unexpected EOF"),
|
||||
retryable: true,
|
||||
},
|
||||
{
|
||||
name: "BrokenPipe",
|
||||
err: xerrors.New("write: broken pipe"),
|
||||
retryable: true,
|
||||
},
|
||||
{
|
||||
name: "NetworkTimeout",
|
||||
err: xerrors.New("i/o timeout"),
|
||||
retryable: true,
|
||||
},
|
||||
{
|
||||
name: "ServiceUnavailable",
|
||||
err: xerrors.New("service unavailable"),
|
||||
retryable: true,
|
||||
},
|
||||
{
|
||||
name: "Unavailable",
|
||||
err: xerrors.New("the service is currently unavailable"),
|
||||
retryable: true,
|
||||
},
|
||||
{
|
||||
name: "Status502",
|
||||
err: xerrors.New("status 502: bad gateway"),
|
||||
retryable: true,
|
||||
},
|
||||
{
|
||||
name: "Status503",
|
||||
err: xerrors.New("status 503"),
|
||||
retryable: true,
|
||||
},
|
||||
|
||||
// Non-retryable errors.
|
||||
{
|
||||
name: "Nil",
|
||||
err: nil,
|
||||
retryable: false,
|
||||
},
|
||||
{
|
||||
name: "ContextCanceled",
|
||||
err: context.Canceled,
|
||||
retryable: false,
|
||||
},
|
||||
{
|
||||
name: "ContextCanceledWrapped",
|
||||
err: xerrors.Errorf("operation failed: %w", context.Canceled),
|
||||
retryable: false,
|
||||
},
|
||||
{
|
||||
name: "ContextCanceledMessage",
|
||||
err: xerrors.New("context canceled"),
|
||||
retryable: false,
|
||||
},
|
||||
{
|
||||
name: "ContextDeadlineExceeded",
|
||||
err: xerrors.New("context deadline exceeded"),
|
||||
retryable: false,
|
||||
},
|
||||
{
|
||||
name: "Authentication",
|
||||
err: xerrors.New("authentication failed"),
|
||||
retryable: false,
|
||||
},
|
||||
{
|
||||
name: "Unauthorized",
|
||||
err: xerrors.New("401 Unauthorized"),
|
||||
retryable: false,
|
||||
},
|
||||
{
|
||||
name: "Forbidden",
|
||||
err: xerrors.New("403 Forbidden"),
|
||||
retryable: false,
|
||||
},
|
||||
{
|
||||
name: "InvalidAPIKey",
|
||||
err: xerrors.New("invalid api key"),
|
||||
retryable: false,
|
||||
},
|
||||
{
|
||||
name: "InvalidAPIKeyUnderscore",
|
||||
err: xerrors.New("invalid_api_key"),
|
||||
retryable: false,
|
||||
},
|
||||
{
|
||||
name: "InvalidModel",
|
||||
err: xerrors.New("invalid model: gpt-5-turbo"),
|
||||
retryable: false,
|
||||
},
|
||||
{
|
||||
name: "ModelNotFound",
|
||||
err: xerrors.New("model not found"),
|
||||
retryable: false,
|
||||
},
|
||||
{
|
||||
name: "ModelNotFoundUnderscore",
|
||||
err: xerrors.New("model_not_found"),
|
||||
retryable: false,
|
||||
},
|
||||
{
|
||||
name: "ContextLengthExceeded",
|
||||
err: xerrors.New("context length exceeded"),
|
||||
retryable: false,
|
||||
},
|
||||
{
|
||||
name: "ContextExceededUnderscore",
|
||||
err: xerrors.New("context_exceeded"),
|
||||
retryable: false,
|
||||
},
|
||||
{
|
||||
name: "MaximumContextLength",
|
||||
err: xerrors.New("maximum context length"),
|
||||
retryable: false,
|
||||
},
|
||||
{
|
||||
name: "QuotaExceeded",
|
||||
err: xerrors.New("quota exceeded"),
|
||||
retryable: false,
|
||||
},
|
||||
{
|
||||
name: "BillingError",
|
||||
err: xerrors.New("billing issue: payment required"),
|
||||
retryable: false,
|
||||
},
|
||||
|
||||
// Wrapped errors preserve retryability.
|
||||
{
|
||||
name: "WrappedRetryable",
|
||||
err: xerrors.Errorf("provider call failed: %w", xerrors.New("service unavailable")),
|
||||
retryable: true,
|
||||
},
|
||||
{
|
||||
name: "WrappedNonRetryable",
|
||||
err: xerrors.Errorf("provider call failed: %w", xerrors.New("invalid api key")),
|
||||
retryable: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got := chatretry.IsRetryable(tt.err)
|
||||
if got != tt.retryable {
|
||||
t.Errorf("IsRetryable(%v) = %v, want %v", tt.err, got, tt.retryable)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatusCodeRetryable(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
code int
|
||||
retryable bool
|
||||
}{
|
||||
{429, true},
|
||||
{500, true},
|
||||
{502, true},
|
||||
{503, true},
|
||||
{529, true},
|
||||
{200, false},
|
||||
{400, false},
|
||||
{401, false},
|
||||
{403, false},
|
||||
{404, false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(fmt.Sprintf("Status%d", tt.code), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got := chatretry.StatusCodeRetryable(tt.code)
|
||||
if got != tt.retryable {
|
||||
t.Errorf("StatusCodeRetryable(%d) = %v, want %v", tt.code, got, tt.retryable)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelay(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
attempt int
|
||||
want time.Duration
|
||||
}{
|
||||
{0, 1 * time.Second},
|
||||
{1, 2 * time.Second},
|
||||
{2, 4 * time.Second},
|
||||
{3, 8 * time.Second},
|
||||
{4, 16 * time.Second},
|
||||
{5, 32 * time.Second},
|
||||
{6, 60 * time.Second}, // Capped at MaxDelay.
|
||||
{10, 60 * time.Second}, // Still capped.
|
||||
{100, 60 * time.Second},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(fmt.Sprintf("Attempt%d", tt.attempt), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got := chatretry.Delay(tt.attempt)
|
||||
if got != tt.want {
|
||||
t.Errorf("Delay(%d) = %v, want %v", tt.attempt, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetry_SuccessOnFirstTry(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
calls := 0
|
||||
err := chatretry.Retry(context.Background(), func(_ context.Context) error {
|
||||
calls++
|
||||
return nil
|
||||
}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("expected nil error, got %v", err)
|
||||
}
|
||||
if calls != 1 {
|
||||
t.Fatalf("expected fn called once, got %d", calls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetry_TransientThenSuccess(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
calls := 0
|
||||
err := chatretry.Retry(context.Background(), func(_ context.Context) error {
|
||||
calls++
|
||||
if calls == 1 {
|
||||
return xerrors.New("service unavailable")
|
||||
}
|
||||
return nil
|
||||
}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("expected nil error, got %v", err)
|
||||
}
|
||||
if calls != 2 {
|
||||
t.Fatalf("expected fn called twice, got %d", calls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetry_MultipleTransientThenSuccess(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
calls := 0
|
||||
err := chatretry.Retry(context.Background(), func(_ context.Context) error {
|
||||
calls++
|
||||
if calls <= 3 {
|
||||
return xerrors.New("overloaded")
|
||||
}
|
||||
return nil
|
||||
}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("expected nil error, got %v", err)
|
||||
}
|
||||
if calls != 4 {
|
||||
t.Fatalf("expected fn called 4 times, got %d", calls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetry_NonRetryableError(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
calls := 0
|
||||
err := chatretry.Retry(context.Background(), func(_ context.Context) error {
|
||||
calls++
|
||||
return xerrors.New("invalid api key")
|
||||
}, nil)
|
||||
|
||||
if err == nil {
|
||||
t.Fatal("expected error, got nil")
|
||||
}
|
||||
if err.Error() != "invalid api key" {
|
||||
t.Fatalf("expected 'invalid api key', got %q", err.Error())
|
||||
}
|
||||
if calls != 1 {
|
||||
t.Fatalf("expected fn called once, got %d", calls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetry_ContextCanceledDuringWait(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
calls := 0
|
||||
err := chatretry.Retry(ctx, func(_ context.Context) error {
|
||||
calls++
|
||||
// Cancel after the first retryable error so the wait
|
||||
// select picks up the cancellation.
|
||||
if calls == 1 {
|
||||
cancel()
|
||||
}
|
||||
return xerrors.New("overloaded")
|
||||
}, nil)
|
||||
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
t.Fatalf("expected context.Canceled, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetry_ContextCanceledDuringFn(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
err := chatretry.Retry(ctx, func(_ context.Context) error {
|
||||
cancel()
|
||||
// Return a retryable error; the loop should detect that
|
||||
// ctx is done and return the context error.
|
||||
return xerrors.New("overloaded")
|
||||
}, nil)
|
||||
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
t.Fatalf("expected context.Canceled, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetry_OnRetryCalledWithCorrectArgs(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type retryRecord struct {
|
||||
attempt int
|
||||
errMsg string
|
||||
delay time.Duration
|
||||
}
|
||||
var records []retryRecord
|
||||
|
||||
calls := 0
|
||||
err := chatretry.Retry(context.Background(), func(_ context.Context) error {
|
||||
calls++
|
||||
if calls <= 2 {
|
||||
return xerrors.New("rate limit exceeded")
|
||||
}
|
||||
return nil
|
||||
}, func(attempt int, err error, delay time.Duration) {
|
||||
records = append(records, retryRecord{
|
||||
attempt: attempt,
|
||||
errMsg: err.Error(),
|
||||
delay: delay,
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("expected nil error, got %v", err)
|
||||
}
|
||||
if len(records) != 2 {
|
||||
t.Fatalf("expected 2 onRetry calls, got %d", len(records))
|
||||
}
|
||||
if records[0].attempt != 1 {
|
||||
t.Errorf("first onRetry attempt = %d, want 1", records[0].attempt)
|
||||
}
|
||||
if records[1].attempt != 2 {
|
||||
t.Errorf("second onRetry attempt = %d, want 2", records[1].attempt)
|
||||
}
|
||||
if records[0].errMsg != "rate limit exceeded" {
|
||||
t.Errorf("first onRetry error = %q, want 'rate limit exceeded'", records[0].errMsg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetry_OnRetryNilDoesNotPanic(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var calls atomic.Int32
|
||||
err := chatretry.Retry(context.Background(), func(_ context.Context) error {
|
||||
if calls.Add(1) == 1 {
|
||||
return xerrors.New("overloaded")
|
||||
}
|
||||
return nil
|
||||
}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("expected nil error, got %v", err)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,409 @@
|
||||
package chattest
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// AnthropicHandler handles Anthropic API requests and returns a response.
|
||||
type AnthropicHandler func(req *AnthropicRequest) AnthropicResponse
|
||||
|
||||
// AnthropicResponse represents a response to an Anthropic request.
|
||||
// Either StreamingChunks or Response should be set, not both.
|
||||
type AnthropicResponse struct {
|
||||
StreamingChunks <-chan AnthropicChunk
|
||||
Response *AnthropicMessage
|
||||
Error *ErrorResponse // If set, server returns this HTTP error instead of streaming/JSON.
|
||||
}
|
||||
|
||||
// AnthropicRequest represents an Anthropic messages request.
|
||||
type AnthropicRequest struct {
|
||||
*http.Request // Embed http.Request
|
||||
Model string `json:"model"`
|
||||
Messages []AnthropicRequestMessage `json:"messages"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
// TODO: encoding/json ignores inline tags. Add custom UnmarshalJSON to capture unknown keys.
|
||||
Options map[string]interface{} `json:",inline"` //nolint:revive
|
||||
}
|
||||
|
||||
// AnthropicRequestMessage represents a message in an Anthropic request.
|
||||
// Content may be either a string or a structured content array.
|
||||
type AnthropicRequestMessage struct {
|
||||
Role string `json:"role"`
|
||||
Content json.RawMessage `json:"content"`
|
||||
}
|
||||
|
||||
// AnthropicMessage represents a message in an Anthropic response.
|
||||
type AnthropicMessage struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content,omitempty"`
|
||||
Model string `json:"model,omitempty"`
|
||||
StopReason string `json:"stop_reason,omitempty"`
|
||||
Usage AnthropicUsage `json:"usage,omitempty"`
|
||||
}
|
||||
|
||||
// AnthropicUsage represents usage information in an Anthropic response.
|
||||
type AnthropicUsage struct {
|
||||
InputTokens int `json:"input_tokens"`
|
||||
OutputTokens int `json:"output_tokens"`
|
||||
}
|
||||
|
||||
// AnthropicChunk represents a streaming chunk from Anthropic.
|
||||
type AnthropicChunk struct {
|
||||
Type string `json:"type"`
|
||||
Index int `json:"index,omitempty"`
|
||||
Message AnthropicChunkMessage `json:"message,omitempty"`
|
||||
ContentBlock AnthropicContentBlock `json:"content_block,omitempty"`
|
||||
Delta AnthropicDeltaBlock `json:"delta,omitempty"`
|
||||
StopReason string `json:"stop_reason,omitempty"`
|
||||
StopSequence *string `json:"stop_sequence,omitempty"`
|
||||
Usage AnthropicUsage `json:"usage,omitempty"`
|
||||
}
|
||||
|
||||
// AnthropicChunkMessage represents message metadata in a chunk.
|
||||
type AnthropicChunkMessage struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Role string `json:"role"`
|
||||
Model string `json:"model"`
|
||||
}
|
||||
|
||||
// AnthropicContentBlock represents a content block in a chunk.
|
||||
type AnthropicContentBlock struct {
|
||||
Type string `json:"type"`
|
||||
Text string `json:"text,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Input json.RawMessage `json:"input,omitempty"`
|
||||
}
|
||||
|
||||
// AnthropicDeltaBlock represents a delta block in a chunk.
|
||||
type AnthropicDeltaBlock struct {
|
||||
Type string `json:"type"`
|
||||
Text string `json:"text,omitempty"`
|
||||
PartialJSON string `json:"partial_json,omitempty"`
|
||||
}
|
||||
|
||||
// anthropicServer is a test server that mocks the Anthropic API.
|
||||
type anthropicServer struct {
|
||||
mu sync.Mutex
|
||||
server *httptest.Server
|
||||
handler AnthropicHandler
|
||||
request *AnthropicRequest
|
||||
}
|
||||
|
||||
// NewAnthropic creates a new Anthropic test server with a handler function.
|
||||
// The handler is called for each request and should return either a streaming
|
||||
// response (via channel) or a non-streaming response.
|
||||
// Returns the base URL of the server.
|
||||
func NewAnthropic(t testing.TB, handler AnthropicHandler) string {
|
||||
t.Helper()
|
||||
|
||||
s := &anthropicServer{
|
||||
handler: handler,
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("POST /v1/messages", s.handleMessages)
|
||||
|
||||
s.server = httptest.NewServer(mux)
|
||||
|
||||
t.Cleanup(func() {
|
||||
s.server.Close()
|
||||
})
|
||||
|
||||
return s.server.URL
|
||||
}
|
||||
|
||||
func (s *anthropicServer) handleMessages(w http.ResponseWriter, r *http.Request) {
|
||||
var req AnthropicRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
// Return a more detailed error for debugging
|
||||
http.Error(w, fmt.Sprintf("decode request: %v", err), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
req.Request = r // Embed the original http.Request
|
||||
|
||||
s.mu.Lock()
|
||||
s.request = &req
|
||||
s.mu.Unlock()
|
||||
|
||||
resp := s.handler(&req)
|
||||
s.writeResponse(w, &req, resp)
|
||||
}
|
||||
|
||||
func (s *anthropicServer) writeResponse(w http.ResponseWriter, req *AnthropicRequest, resp AnthropicResponse) {
|
||||
if resp.Error != nil {
|
||||
writeErrorResponse(w, resp.Error)
|
||||
return
|
||||
}
|
||||
|
||||
hasStreaming := resp.StreamingChunks != nil
|
||||
hasNonStreaming := resp.Response != nil
|
||||
|
||||
switch {
|
||||
case hasStreaming && hasNonStreaming:
|
||||
http.Error(w, "handler returned both streaming and non-streaming responses", http.StatusInternalServerError)
|
||||
return
|
||||
case !hasStreaming && !hasNonStreaming:
|
||||
http.Error(w, "handler returned empty response", http.StatusInternalServerError)
|
||||
return
|
||||
case req.Stream && !hasStreaming:
|
||||
http.Error(w, "handler returned non-streaming response for streaming request", http.StatusInternalServerError)
|
||||
return
|
||||
case !req.Stream && !hasNonStreaming:
|
||||
http.Error(w, "handler returned streaming response for non-streaming request", http.StatusInternalServerError)
|
||||
return
|
||||
case hasStreaming:
|
||||
s.writeStreamingResponse(w, resp.StreamingChunks)
|
||||
default:
|
||||
s.writeNonStreamingResponse(w, resp.Response)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *anthropicServer) writeStreamingResponse(w http.ResponseWriter, chunks <-chan AnthropicChunk) {
|
||||
_ = s // receiver unused but kept for consistency
|
||||
w.Header().Set("Content-Type", "text/event-stream")
|
||||
w.Header().Set("Cache-Control", "no-cache")
|
||||
w.Header().Set("Connection", "keep-alive")
|
||||
w.Header().Set("anthropic-version", "2023-06-01")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
flusher, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
http.Error(w, "streaming not supported", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
for chunk := range chunks {
|
||||
chunkData := make(map[string]interface{})
|
||||
chunkData["type"] = chunk.Type
|
||||
|
||||
switch chunk.Type {
|
||||
case "message_start":
|
||||
chunkData["message"] = chunk.Message
|
||||
case "content_block_start":
|
||||
chunkData["index"] = chunk.Index
|
||||
chunkData["content_block"] = chunk.ContentBlock
|
||||
case "content_block_delta":
|
||||
chunkData["index"] = chunk.Index
|
||||
chunkData["delta"] = chunk.Delta
|
||||
case "content_block_stop":
|
||||
chunkData["index"] = chunk.Index
|
||||
case "message_delta":
|
||||
chunkData["delta"] = map[string]interface{}{
|
||||
"stop_reason": chunk.StopReason,
|
||||
"stop_sequence": chunk.StopSequence,
|
||||
}
|
||||
chunkData["usage"] = chunk.Usage
|
||||
case "message_stop":
|
||||
// No additional fields
|
||||
}
|
||||
|
||||
chunkBytes, err := json.Marshal(chunkData)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Send both event and data lines to match Anthropic API format
|
||||
if _, err := fmt.Fprintf(w, "event: %s\ndata: %s\n\n", chunk.Type, chunkBytes); err != nil {
|
||||
return
|
||||
}
|
||||
flusher.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *anthropicServer) writeNonStreamingResponse(w http.ResponseWriter, resp *AnthropicMessage) {
|
||||
_ = s // receiver unused but kept for consistency
|
||||
response := map[string]interface{}{
|
||||
"id": resp.ID,
|
||||
"type": resp.Type,
|
||||
"role": resp.Role,
|
||||
"model": resp.Model,
|
||||
"content": []map[string]interface{}{
|
||||
{
|
||||
"type": "text",
|
||||
"text": resp.Content,
|
||||
},
|
||||
},
|
||||
"stop_reason": resp.StopReason,
|
||||
"usage": resp.Usage,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Header().Set("anthropic-version", "2023-06-01")
|
||||
_ = json.NewEncoder(w).Encode(response)
|
||||
}
|
||||
|
||||
// AnthropicStreamingResponse creates a streaming response from chunks.
|
||||
func AnthropicStreamingResponse(chunks ...AnthropicChunk) AnthropicResponse {
|
||||
ch := make(chan AnthropicChunk, len(chunks))
|
||||
go func() {
|
||||
for _, chunk := range chunks {
|
||||
ch <- chunk
|
||||
}
|
||||
close(ch)
|
||||
}()
|
||||
return AnthropicResponse{StreamingChunks: ch}
|
||||
}
|
||||
|
||||
// AnthropicNonStreamingResponse creates a non-streaming response with the given text.
|
||||
func AnthropicNonStreamingResponse(text string) AnthropicResponse {
|
||||
return AnthropicResponse{
|
||||
Response: &AnthropicMessage{
|
||||
ID: fmt.Sprintf("msg-%s", uuid.New().String()[:8]),
|
||||
Type: "message",
|
||||
Role: "assistant",
|
||||
Content: text,
|
||||
Model: "claude-3-opus-20240229",
|
||||
StopReason: "end_turn",
|
||||
Usage: AnthropicUsage{
|
||||
InputTokens: 10,
|
||||
OutputTokens: 5,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AnthropicTextChunks creates a complete streaming response with text deltas.
|
||||
// Takes text deltas and creates all required chunks (message_start,
|
||||
// content_block_start, content_block_delta for each delta,
|
||||
// content_block_stop, message_delta, message_stop).
|
||||
func AnthropicTextChunks(deltas ...string) []AnthropicChunk {
|
||||
if len(deltas) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
messageID := fmt.Sprintf("msg-%s", uuid.New().String()[:8])
|
||||
model := "claude-3-opus-20240229"
|
||||
|
||||
chunks := []AnthropicChunk{
|
||||
{
|
||||
Type: "message_start",
|
||||
Message: AnthropicChunkMessage{
|
||||
ID: messageID,
|
||||
Type: "message",
|
||||
Role: "assistant",
|
||||
Model: model,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "content_block_start",
|
||||
Index: 0,
|
||||
ContentBlock: AnthropicContentBlock{
|
||||
Type: "text",
|
||||
Text: "", // According to Anthropic API spec, text should be empty in content_block_start
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Add a delta chunk for each delta
|
||||
for _, delta := range deltas {
|
||||
chunks = append(chunks, AnthropicChunk{
|
||||
Type: "content_block_delta",
|
||||
Index: 0,
|
||||
Delta: AnthropicDeltaBlock{
|
||||
Type: "text_delta",
|
||||
Text: delta,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
chunks = append(chunks,
|
||||
AnthropicChunk{
|
||||
Type: "content_block_stop",
|
||||
Index: 0,
|
||||
},
|
||||
AnthropicChunk{
|
||||
Type: "message_delta",
|
||||
StopReason: "end_turn",
|
||||
Usage: AnthropicUsage{
|
||||
InputTokens: 10,
|
||||
OutputTokens: 5,
|
||||
},
|
||||
},
|
||||
AnthropicChunk{
|
||||
Type: "message_stop",
|
||||
},
|
||||
)
|
||||
|
||||
return chunks
|
||||
}
|
||||
|
||||
// AnthropicToolCallChunks creates a complete streaming response for a tool call.
|
||||
// Input JSON can be split across multiple deltas, matching Anthropic's
|
||||
// input_json_delta streaming behavior.
|
||||
func AnthropicToolCallChunks(toolName string, inputJSONDeltas ...string) []AnthropicChunk {
|
||||
if len(inputJSONDeltas) == 0 {
|
||||
return nil
|
||||
}
|
||||
if toolName == "" {
|
||||
toolName = "tool"
|
||||
}
|
||||
|
||||
messageID := fmt.Sprintf("msg-%s", uuid.New().String()[:8])
|
||||
model := "claude-3-opus-20240229"
|
||||
toolCallID := fmt.Sprintf("toolu_%s", uuid.New().String()[:8])
|
||||
|
||||
chunks := []AnthropicChunk{
|
||||
{
|
||||
Type: "message_start",
|
||||
Message: AnthropicChunkMessage{
|
||||
ID: messageID,
|
||||
Type: "message",
|
||||
Role: "assistant",
|
||||
Model: model,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "content_block_start",
|
||||
Index: 0,
|
||||
ContentBlock: AnthropicContentBlock{
|
||||
Type: "tool_use",
|
||||
ID: toolCallID,
|
||||
Name: toolName,
|
||||
Input: json.RawMessage("{}"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, delta := range inputJSONDeltas {
|
||||
chunks = append(chunks, AnthropicChunk{
|
||||
Type: "content_block_delta",
|
||||
Index: 0,
|
||||
Delta: AnthropicDeltaBlock{
|
||||
Type: "input_json_delta",
|
||||
PartialJSON: delta,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
chunks = append(chunks,
|
||||
AnthropicChunk{
|
||||
Type: "content_block_stop",
|
||||
Index: 0,
|
||||
},
|
||||
AnthropicChunk{
|
||||
Type: "message_delta",
|
||||
StopReason: "tool_use",
|
||||
Usage: AnthropicUsage{
|
||||
InputTokens: 10,
|
||||
OutputTokens: 5,
|
||||
},
|
||||
},
|
||||
AnthropicChunk{
|
||||
Type: "message_stop",
|
||||
},
|
||||
)
|
||||
|
||||
return chunks
|
||||
}
|
||||
@@ -0,0 +1,221 @@
|
||||
package chattest_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"charm.land/fantasy"
|
||||
fantasyanthropic "charm.land/fantasy/providers/anthropic"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/chatd/chattest"
|
||||
)
|
||||
|
||||
func TestAnthropic_Streaming(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
serverURL := chattest.NewAnthropic(t, func(req *chattest.AnthropicRequest) chattest.AnthropicResponse {
|
||||
return chattest.AnthropicStreamingResponse(
|
||||
chattest.AnthropicTextChunks("Hello", " world", "!")...,
|
||||
)
|
||||
})
|
||||
|
||||
// Create fantasy client pointing to our test server
|
||||
client, err := fantasyanthropic.New(
|
||||
fantasyanthropic.WithAPIKey("test-key"),
|
||||
fantasyanthropic.WithBaseURL(serverURL),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
model, err := client.LanguageModel(ctx, "claude-3-opus-20240229")
|
||||
require.NoError(t, err)
|
||||
|
||||
call := fantasy.Call{
|
||||
Prompt: []fantasy.Message{
|
||||
{
|
||||
Role: fantasy.MessageRoleUser,
|
||||
Content: []fantasy.MessagePart{
|
||||
fantasy.TextPart{Text: "Say hello"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
stream, err := model.Stream(ctx, call)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedDeltas := []string{"Hello", " world", "!"}
|
||||
deltaIndex := 0
|
||||
|
||||
var allParts []fantasy.StreamPart
|
||||
for part := range stream {
|
||||
allParts = append(allParts, part)
|
||||
if part.Type == fantasy.StreamPartTypeTextDelta {
|
||||
require.Less(t, deltaIndex, len(expectedDeltas), "Received more deltas than expected")
|
||||
require.Equal(t, expectedDeltas[deltaIndex], part.Delta,
|
||||
"Delta at index %d should be %q, got %q", deltaIndex, expectedDeltas[deltaIndex], part.Delta)
|
||||
deltaIndex++
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, len(expectedDeltas), deltaIndex, "Expected %d deltas, got %d. Total parts received: %d", len(expectedDeltas), deltaIndex, len(allParts))
|
||||
}
|
||||
|
||||
func TestAnthropic_ToolCalls(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var requestCount atomic.Int32
|
||||
serverURL := chattest.NewAnthropic(t, func(req *chattest.AnthropicRequest) chattest.AnthropicResponse {
|
||||
switch requestCount.Add(1) {
|
||||
case 1:
|
||||
return chattest.AnthropicStreamingResponse(
|
||||
chattest.AnthropicToolCallChunks("get_weather", `{"location":"San Francisco"}`)...,
|
||||
)
|
||||
default:
|
||||
return chattest.AnthropicStreamingResponse(
|
||||
chattest.AnthropicTextChunks("The weather in San Francisco is 72F.")...,
|
||||
)
|
||||
}
|
||||
})
|
||||
|
||||
client, err := fantasyanthropic.New(
|
||||
fantasyanthropic.WithAPIKey("test-key"),
|
||||
fantasyanthropic.WithBaseURL(serverURL),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
model, err := client.LanguageModel(context.Background(), "claude-3-opus-20240229")
|
||||
require.NoError(t, err)
|
||||
|
||||
type weatherInput struct {
|
||||
Location string `json:"location"`
|
||||
}
|
||||
var toolCallCount atomic.Int32
|
||||
weatherTool := fantasy.NewAgentTool(
|
||||
"get_weather",
|
||||
"Get weather for a location.",
|
||||
func(ctx context.Context, input weatherInput, _ fantasy.ToolCall) (fantasy.ToolResponse, error) {
|
||||
toolCallCount.Add(1)
|
||||
require.Equal(t, "San Francisco", input.Location)
|
||||
return fantasy.NewTextResponse("72F"), nil
|
||||
},
|
||||
)
|
||||
|
||||
agent := fantasy.NewAgent(
|
||||
model,
|
||||
fantasy.WithSystemPrompt("You are a helpful assistant."),
|
||||
fantasy.WithTools(weatherTool),
|
||||
)
|
||||
|
||||
result, err := agent.Stream(context.Background(), fantasy.AgentStreamCall{
|
||||
Prompt: "What's the weather in San Francisco?",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, result)
|
||||
|
||||
require.Equal(t, int32(1), toolCallCount.Load(), "expected exactly one tool execution")
|
||||
require.GreaterOrEqual(t, requestCount.Load(), int32(2), "expected follow-up model call after tool execution")
|
||||
}
|
||||
|
||||
func TestAnthropic_NonStreaming(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
serverURL := chattest.NewAnthropic(t, func(req *chattest.AnthropicRequest) chattest.AnthropicResponse {
|
||||
return chattest.AnthropicNonStreamingResponse("Response text")
|
||||
})
|
||||
|
||||
// Create fantasy client pointing to our test server
|
||||
client, err := fantasyanthropic.New(
|
||||
fantasyanthropic.WithAPIKey("test-key"),
|
||||
fantasyanthropic.WithBaseURL(serverURL),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
model, err := client.LanguageModel(ctx, "claude-3-opus-20240229")
|
||||
require.NoError(t, err)
|
||||
|
||||
call := fantasy.Call{
|
||||
Prompt: []fantasy.Message{
|
||||
{
|
||||
Role: fantasy.MessageRoleUser,
|
||||
Content: []fantasy.MessagePart{
|
||||
fantasy.TextPart{Text: "Test message"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
response, err := model.Generate(ctx, call)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, response)
|
||||
}
|
||||
|
||||
func TestAnthropic_Streaming_MismatchReturnsErrorPart(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
serverURL := chattest.NewAnthropic(t, func(req *chattest.AnthropicRequest) chattest.AnthropicResponse {
|
||||
return chattest.AnthropicNonStreamingResponse("wrong response type")
|
||||
})
|
||||
|
||||
client, err := fantasyanthropic.New(
|
||||
fantasyanthropic.WithAPIKey("test-key"),
|
||||
fantasyanthropic.WithBaseURL(serverURL),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
model, err := client.LanguageModel(context.Background(), "claude-3-opus-20240229")
|
||||
require.NoError(t, err)
|
||||
|
||||
stream, err := model.Stream(context.Background(), fantasy.Call{
|
||||
Prompt: []fantasy.Message{
|
||||
{
|
||||
Role: fantasy.MessageRoleUser,
|
||||
Content: []fantasy.MessagePart{fantasy.TextPart{Text: "hello"}},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
var streamErr error
|
||||
for part := range stream {
|
||||
if part.Type == fantasy.StreamPartTypeError {
|
||||
streamErr = part.Error
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Error(t, streamErr)
|
||||
require.Contains(t, streamErr.Error(), "500 Internal Server Error")
|
||||
}
|
||||
|
||||
func TestAnthropic_NonStreaming_MismatchReturnsError(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
serverURL := chattest.NewAnthropic(t, func(req *chattest.AnthropicRequest) chattest.AnthropicResponse {
|
||||
return chattest.AnthropicStreamingResponse(
|
||||
chattest.AnthropicTextChunks("wrong", " response")...,
|
||||
)
|
||||
})
|
||||
|
||||
client, err := fantasyanthropic.New(
|
||||
fantasyanthropic.WithAPIKey("test-key"),
|
||||
fantasyanthropic.WithBaseURL(serverURL),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
model, err := client.LanguageModel(context.Background(), "claude-3-opus-20240229")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = model.Generate(context.Background(), fantasy.Call{
|
||||
Prompt: []fantasy.Message{
|
||||
{
|
||||
Role: fantasy.MessageRoleUser,
|
||||
Content: []fantasy.MessagePart{fantasy.TextPart{Text: "hello"}},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "500 Internal Server Error")
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user