Compare commits
29 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 2970c54140 | |||
| 26e3da1f17 | |||
| b49c4b3257 | |||
| 55da992aeb | |||
| 613029cb21 | |||
| 7e0cf53dd1 | |||
| fa050ee0ab | |||
| bfb6583ecc | |||
| 40b3970388 | |||
| fa284dc149 | |||
| b89dc439b7 | |||
| d4ce9620d6 | |||
| 16408b157b | |||
| ef29702014 | |||
| 43e67d12e2 | |||
| 94cf95a3e8 | |||
| 5e2f845272 | |||
| 3d5dc93060 | |||
| 6e1fe14d6c | |||
| c0b939f7e4 | |||
| 1fd77bc459 | |||
| 37c3476ca7 | |||
| 26a3f82a39 | |||
| ea6b11472c | |||
| a92dc3d5b3 | |||
| a69aea2c83 | |||
| c2db391019 | |||
| 895cc07395 | |||
| 0377c985e4 |
@@ -4,7 +4,7 @@ description: |
|
||||
inputs:
|
||||
version:
|
||||
description: "The Go version to use."
|
||||
default: "1.25.6"
|
||||
default: "1.25.7"
|
||||
use-preinstalled-go:
|
||||
description: "Whether to use preinstalled Go."
|
||||
default: "false"
|
||||
|
||||
@@ -7,5 +7,5 @@ runs:
|
||||
- name: Install Terraform
|
||||
uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3.1.2
|
||||
with:
|
||||
terraform_version: 1.14.1
|
||||
terraform_version: 1.14.5
|
||||
terraform_wrapper: false
|
||||
|
||||
@@ -35,7 +35,7 @@ jobs:
|
||||
tailnet-integration: ${{ steps.filter.outputs.tailnet-integration }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -157,7 +157,7 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -251,7 +251,7 @@ jobs:
|
||||
if: ${{ !cancelled() }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -308,7 +308,7 @@ jobs:
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -360,7 +360,7 @@ jobs:
|
||||
- windows-2022
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -554,7 +554,7 @@ jobs:
|
||||
timeout-minutes: 25
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -616,7 +616,7 @@ jobs:
|
||||
timeout-minutes: 25
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -688,7 +688,7 @@ jobs:
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -715,7 +715,7 @@ jobs:
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -748,7 +748,7 @@ jobs:
|
||||
name: ${{ matrix.variant.name }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -828,7 +828,7 @@ jobs:
|
||||
if: needs.changes.outputs.site == 'true' || needs.changes.outputs.ci == 'true'
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -909,7 +909,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -980,7 +980,7 @@ jobs:
|
||||
if: always()
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -1100,7 +1100,7 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -1155,7 +1155,7 @@ jobs:
|
||||
IMAGE: ghcr.io/coder/coder-preview:${{ steps.build-docker.outputs.tag }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -1552,7 +1552,7 @@ jobs:
|
||||
if: needs.changes.outputs.db == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
verdict: ${{ steps.check.outputs.verdict }} # DEPLOY or NOOP
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -65,7 +65,7 @@ jobs:
|
||||
packages: write # to retag image as dogfood
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -146,7 +146,7 @@ jobs:
|
||||
needs: deploy
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ jobs:
|
||||
if: github.repository_owner == 'coder'
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-4' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -125,7 +125,7 @@ jobs:
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
- windows-2022
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ jobs:
|
||||
packages: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
PR_OPEN: ${{ steps.check_pr.outputs.pr_open }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -76,7 +76,7 @@ jobs:
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -184,7 +184,7 @@ jobs:
|
||||
pull-requests: write # needed for commenting on PRs
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -228,7 +228,7 @@ jobs:
|
||||
CODER_IMAGE_TAG: ${{ needs.get_info.outputs.CODER_IMAGE_TAG }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -288,7 +288,7 @@ jobs:
|
||||
PR_HOSTNAME: "pr${{ needs.get_info.outputs.PR_NUMBER }}.${{ secrets.PR_DEPLOYMENTS_DOMAIN }}"
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -164,7 +164,7 @@ jobs:
|
||||
version: ${{ steps.version.outputs.version }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -802,7 +802,7 @@ jobs:
|
||||
# TODO: skip this if it's not a new release (i.e. a backport). This is
|
||||
# fine right now because it just makes a PR that we can close.
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -878,7 +878,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -971,7 +971,7 @@ jobs:
|
||||
if: ${{ !inputs.dry_run }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -69,7 +69,7 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -146,7 +146,7 @@ jobs:
|
||||
echo "image=$(cat "$image_job")" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8
|
||||
uses: aquasecurity/trivy-action@c1824fd6edce30d7ab345a9989de00bbd46ef284 # v0.34.0
|
||||
with:
|
||||
image-ref: ${{ steps.build.outputs.image }}
|
||||
format: sarif
|
||||
|
||||
@@ -18,7 +18,7 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -96,7 +96,7 @@ jobs:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -120,7 +120,7 @@ jobs:
|
||||
actions: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ jobs:
|
||||
pull-requests: write # required to post PR review comments by the action
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
|
||||
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -7,6 +7,6 @@ func IsInitProcess() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func ForkReap(_ ...Option) error {
|
||||
return nil
|
||||
func ForkReap(_ ...Option) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
@@ -32,12 +32,13 @@ func TestReap(t *testing.T) {
|
||||
}
|
||||
|
||||
pids := make(reap.PidCh, 1)
|
||||
err := reaper.ForkReap(
|
||||
exitCode, err := reaper.ForkReap(
|
||||
reaper.WithPIDCallback(pids),
|
||||
// Provide some argument that immediately exits.
|
||||
reaper.WithExecArgs("/bin/sh", "-c", "exit 0"),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, exitCode)
|
||||
|
||||
cmd := exec.Command("tail", "-f", "/dev/null")
|
||||
err = cmd.Start()
|
||||
@@ -65,6 +66,36 @@ func TestReap(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:paralleltest
|
||||
func TestForkReapExitCodes(t *testing.T) {
|
||||
if testutil.InCI() {
|
||||
t.Skip("Detected CI, skipping reaper tests")
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
command string
|
||||
expectedCode int
|
||||
}{
|
||||
{"exit 0", "exit 0", 0},
|
||||
{"exit 1", "exit 1", 1},
|
||||
{"exit 42", "exit 42", 42},
|
||||
{"exit 255", "exit 255", 255},
|
||||
{"SIGKILL", "kill -9 $$", 128 + 9},
|
||||
{"SIGTERM", "kill -15 $$", 128 + 15},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
exitCode, err := reaper.ForkReap(
|
||||
reaper.WithExecArgs("/bin/sh", "-c", tt.command),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedCode, exitCode, "exit code mismatch for %q", tt.command)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:paralleltest // Signal handling.
|
||||
func TestReapInterrupt(t *testing.T) {
|
||||
// Don't run the reaper test in CI. It does weird
|
||||
@@ -84,13 +115,17 @@ func TestReapInterrupt(t *testing.T) {
|
||||
defer signal.Stop(usrSig)
|
||||
|
||||
go func() {
|
||||
errC <- reaper.ForkReap(
|
||||
exitCode, err := reaper.ForkReap(
|
||||
reaper.WithPIDCallback(pids),
|
||||
reaper.WithCatchSignals(os.Interrupt),
|
||||
// Signal propagation does not extend to children of children, so
|
||||
// we create a little bash script to ensure sleep is interrupted.
|
||||
reaper.WithExecArgs("/bin/sh", "-c", fmt.Sprintf("pid=0; trap 'kill -USR2 %d; kill -TERM $pid' INT; sleep 10 &\npid=$!; kill -USR1 %d; wait", os.Getpid(), os.Getpid())),
|
||||
)
|
||||
// The child exits with 128 + SIGTERM (15) = 143, but the trap catches
|
||||
// SIGINT and sends SIGTERM to the sleep process, so exit code varies.
|
||||
_ = exitCode
|
||||
errC <- err
|
||||
}()
|
||||
|
||||
require.Equal(t, <-usrSig, syscall.SIGUSR1)
|
||||
|
||||
@@ -40,7 +40,10 @@ func catchSignals(pid int, sigs []os.Signal) {
|
||||
// the reaper and an exec.Command waiting for its process to complete.
|
||||
// The provided 'pids' channel may be nil if the caller does not care about the
|
||||
// reaped children PIDs.
|
||||
func ForkReap(opt ...Option) error {
|
||||
//
|
||||
// Returns the child's exit code (using 128+signal for signal termination)
|
||||
// and any error from Wait4.
|
||||
func ForkReap(opt ...Option) (int, error) {
|
||||
opts := &options{
|
||||
ExecArgs: os.Args,
|
||||
}
|
||||
@@ -53,7 +56,7 @@ func ForkReap(opt ...Option) error {
|
||||
|
||||
pwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get wd: %w", err)
|
||||
return 1, xerrors.Errorf("get wd: %w", err)
|
||||
}
|
||||
|
||||
pattrs := &syscall.ProcAttr{
|
||||
@@ -72,7 +75,7 @@ func ForkReap(opt ...Option) error {
|
||||
//#nosec G204
|
||||
pid, err := syscall.ForkExec(opts.ExecArgs[0], opts.ExecArgs, pattrs)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fork exec: %w", err)
|
||||
return 1, xerrors.Errorf("fork exec: %w", err)
|
||||
}
|
||||
|
||||
go catchSignals(pid, opts.CatchSignals)
|
||||
@@ -82,5 +85,18 @@ func ForkReap(opt ...Option) error {
|
||||
for xerrors.Is(err, syscall.EINTR) {
|
||||
_, err = syscall.Wait4(pid, &wstatus, 0, nil)
|
||||
}
|
||||
return err
|
||||
|
||||
// Convert wait status to exit code using standard Unix conventions:
|
||||
// - Normal exit: use the exit code
|
||||
// - Signal termination: use 128 + signal number
|
||||
var exitCode int
|
||||
switch {
|
||||
case wstatus.Exited():
|
||||
exitCode = wstatus.ExitStatus()
|
||||
case wstatus.Signaled():
|
||||
exitCode = 128 + int(wstatus.Signal())
|
||||
default:
|
||||
exitCode = 1
|
||||
}
|
||||
return exitCode, err
|
||||
}
|
||||
|
||||
@@ -136,7 +136,7 @@ func workspaceAgent() *serpent.Command {
|
||||
// to do this else we fork bomb ourselves.
|
||||
//nolint:gocritic
|
||||
args := append(os.Args, "--no-reap")
|
||||
err := reaper.ForkReap(
|
||||
exitCode, err := reaper.ForkReap(
|
||||
reaper.WithExecArgs(args...),
|
||||
reaper.WithCatchSignals(StopSignals...),
|
||||
)
|
||||
@@ -145,8 +145,8 @@ func workspaceAgent() *serpent.Command {
|
||||
return xerrors.Errorf("fork reap: %w", err)
|
||||
}
|
||||
|
||||
logger.Info(ctx, "reaper process exiting")
|
||||
return nil
|
||||
logger.Info(ctx, "reaper child process exited", slog.F("exit_code", exitCode))
|
||||
return ExitError(exitCode, nil)
|
||||
}
|
||||
|
||||
// Handle interrupt signals to allow for graceful shutdown,
|
||||
|
||||
@@ -49,6 +49,9 @@ Examples:
|
||||
# Test OpenAI API through bridge
|
||||
coder scaletest bridge --mode bridge --provider openai --concurrent-users 10 --request-count 5 --num-messages 10
|
||||
|
||||
# Test OpenAI Responses API through bridge
|
||||
coder scaletest bridge --mode bridge --provider responses --concurrent-users 10 --request-count 5 --num-messages 10
|
||||
|
||||
# Test Anthropic API through bridge
|
||||
coder scaletest bridge --mode bridge --provider anthropic --concurrent-users 10 --request-count 5 --num-messages 10
|
||||
|
||||
@@ -219,9 +222,9 @@ Examples:
|
||||
{
|
||||
Flag: "provider",
|
||||
Env: "CODER_SCALETEST_BRIDGE_PROVIDER",
|
||||
Default: "openai",
|
||||
Required: true,
|
||||
Description: "API provider to use.",
|
||||
Value: serpent.EnumOf(&provider, "openai", "anthropic"),
|
||||
Value: serpent.EnumOf(&provider, "completions", "messages", "responses"),
|
||||
},
|
||||
{
|
||||
Flag: "request-count",
|
||||
|
||||
@@ -62,6 +62,7 @@ func (*RootCmd) scaletestLLMMock() *serpent.Command {
|
||||
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "Mock LLM API server started on %s\n", srv.APIAddress())
|
||||
_, _ = fmt.Fprintf(inv.Stdout, " OpenAI endpoint: %s/v1/chat/completions\n", srv.APIAddress())
|
||||
_, _ = fmt.Fprintf(inv.Stdout, " OpenAI responses endpoint: %s/v1/responses\n", srv.APIAddress())
|
||||
_, _ = fmt.Fprintf(inv.Stdout, " Anthropic endpoint: %s/v1/messages\n", srv.APIAddress())
|
||||
|
||||
<-ctx.Done()
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
"github.com/gofrs/flock"
|
||||
"github.com/google/uuid"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/shirou/gopsutil/v4/process"
|
||||
"github.com/spf13/afero"
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
gosshagent "golang.org/x/crypto/ssh/agent"
|
||||
@@ -85,9 +84,6 @@ func (r *RootCmd) ssh() *serpent.Command {
|
||||
|
||||
containerName string
|
||||
containerUser string
|
||||
|
||||
// Used in tests to simulate the parent exiting.
|
||||
testForcePPID int64
|
||||
)
|
||||
cmd := &serpent.Command{
|
||||
Annotations: workspaceCommand,
|
||||
@@ -179,24 +175,6 @@ func (r *RootCmd) ssh() *serpent.Command {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// When running as a ProxyCommand (stdio mode), monitor the parent process
|
||||
// and exit if it dies to avoid leaving orphaned processes. This is
|
||||
// particularly important when editors like VSCode/Cursor spawn SSH
|
||||
// connections and then crash or are killed - we don't want zombie
|
||||
// `coder ssh` processes accumulating.
|
||||
// Note: using gopsutil to check the parent process as this handles
|
||||
// windows processes as well in a standard way.
|
||||
if stdio {
|
||||
ppid := int32(os.Getppid()) // nolint:gosec
|
||||
checkParentInterval := 10 * time.Second // Arbitrary interval to not be too frequent
|
||||
if testForcePPID > 0 {
|
||||
ppid = int32(testForcePPID) // nolint:gosec
|
||||
checkParentInterval = 100 * time.Millisecond // Shorter interval for testing
|
||||
}
|
||||
ctx, cancel = watchParentContext(ctx, quartz.NewReal(), ppid, process.PidExistsWithContext, checkParentInterval)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
// Prevent unnecessary logs from the stdlib from messing up the TTY.
|
||||
// See: https://github.com/coder/coder/issues/13144
|
||||
log.SetOutput(io.Discard)
|
||||
@@ -797,12 +775,6 @@ func (r *RootCmd) ssh() *serpent.Command {
|
||||
Value: serpent.BoolOf(&forceNewTunnel),
|
||||
Hidden: true,
|
||||
},
|
||||
{
|
||||
Flag: "test.force-ppid",
|
||||
Description: "Override the parent process ID to simulate a different parent process. ONLY USE THIS IN TESTS.",
|
||||
Value: serpent.Int64Of(&testForcePPID),
|
||||
Hidden: true,
|
||||
},
|
||||
sshDisableAutostartOption(serpent.BoolOf(&disableAutostart)),
|
||||
}
|
||||
return cmd
|
||||
@@ -1690,33 +1662,3 @@ func normalizeWorkspaceInput(input string) string {
|
||||
return input // Fallback
|
||||
}
|
||||
}
|
||||
|
||||
// watchParentContext returns a context that is canceled when the parent process
|
||||
// dies. It polls using the provided clock and checks if the parent is alive
|
||||
// using the provided pidExists function.
|
||||
func watchParentContext(ctx context.Context, clock quartz.Clock, originalPPID int32, pidExists func(context.Context, int32) (bool, error), interval time.Duration) (context.Context, context.CancelFunc) {
|
||||
ctx, cancel := context.WithCancel(ctx) // intentionally shadowed
|
||||
|
||||
go func() {
|
||||
ticker := clock.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
alive, err := pidExists(ctx, originalPPID)
|
||||
// If we get an error checking the parent process (e.g., permission
|
||||
// denied, the process is in an unknown state), we assume the parent
|
||||
// is still alive to avoid disrupting the SSH connection. We only
|
||||
// cancel when we definitively know the parent is gone (alive=false, err=nil).
|
||||
if !alive && err == nil {
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ctx, cancel
|
||||
}
|
||||
|
||||
@@ -312,102 +312,6 @@ type fakeCloser struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func TestWatchParentContext(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("CancelsWhenParentDies", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
mClock := quartz.NewMock(t)
|
||||
trap := mClock.Trap().NewTicker()
|
||||
defer trap.Close()
|
||||
|
||||
parentAlive := true
|
||||
childCtx, cancel := watchParentContext(ctx, mClock, 1234, func(context.Context, int32) (bool, error) {
|
||||
return parentAlive, nil
|
||||
}, testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
// Wait for the ticker to be created
|
||||
trap.MustWait(ctx).MustRelease(ctx)
|
||||
|
||||
// When: we simulate parent death and advance the clock
|
||||
parentAlive = false
|
||||
mClock.AdvanceNext()
|
||||
|
||||
// Then: The context should be canceled
|
||||
_ = testutil.TryReceive(ctx, t, childCtx.Done())
|
||||
})
|
||||
|
||||
t.Run("DoesNotCancelWhenParentAlive", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
mClock := quartz.NewMock(t)
|
||||
trap := mClock.Trap().NewTicker()
|
||||
defer trap.Close()
|
||||
|
||||
childCtx, cancel := watchParentContext(ctx, mClock, 1234, func(context.Context, int32) (bool, error) {
|
||||
return true, nil // Parent always alive
|
||||
}, testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
// Wait for the ticker to be created
|
||||
trap.MustWait(ctx).MustRelease(ctx)
|
||||
|
||||
// When: we advance the clock several times with the parent alive
|
||||
for range 3 {
|
||||
mClock.AdvanceNext()
|
||||
}
|
||||
|
||||
// Then: context should not be canceled
|
||||
require.NoError(t, childCtx.Err())
|
||||
})
|
||||
|
||||
t.Run("RespectsParentContext", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx, cancelParent := context.WithCancel(context.Background())
|
||||
mClock := quartz.NewMock(t)
|
||||
|
||||
childCtx, cancel := watchParentContext(ctx, mClock, 1234, func(context.Context, int32) (bool, error) {
|
||||
return true, nil
|
||||
}, testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
// When: we cancel the parent context
|
||||
cancelParent()
|
||||
|
||||
// Then: The context should be canceled
|
||||
require.ErrorIs(t, childCtx.Err(), context.Canceled)
|
||||
})
|
||||
|
||||
t.Run("DoesNotCancelOnError", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
mClock := quartz.NewMock(t)
|
||||
trap := mClock.Trap().NewTicker()
|
||||
defer trap.Close()
|
||||
|
||||
// Simulate an error checking parent status (e.g., permission denied).
|
||||
// We should not cancel the context in this case to avoid disrupting
|
||||
// the SSH connection.
|
||||
childCtx, cancel := watchParentContext(ctx, mClock, 1234, func(context.Context, int32) (bool, error) {
|
||||
return false, xerrors.New("permission denied")
|
||||
}, testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
// Wait for the ticker to be created
|
||||
trap.MustWait(ctx).MustRelease(ctx)
|
||||
|
||||
// When: we advance clock several times
|
||||
for range 3 {
|
||||
mClock.AdvanceNext()
|
||||
}
|
||||
|
||||
// Context should NOT be canceled since we got an error (not a definitive "not alive")
|
||||
require.NoError(t, childCtx.Err(), "context was canceled even though pidExists returned an error")
|
||||
})
|
||||
}
|
||||
|
||||
func (c *fakeCloser) Close() error {
|
||||
*c.closes = append(*c.closes, c)
|
||||
return c.err
|
||||
|
||||
@@ -1122,97 +1122,6 @@ func TestSSH(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
// This test ensures that the SSH session exits when the parent process dies.
|
||||
t.Run("StdioExitOnParentDeath", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong)
|
||||
defer cancel()
|
||||
|
||||
// sleepStart -> agentReady -> sessionStarted -> sleepKill -> sleepDone -> cmdDone
|
||||
sleepStart := make(chan int)
|
||||
agentReady := make(chan struct{})
|
||||
sessionStarted := make(chan struct{})
|
||||
sleepKill := make(chan struct{})
|
||||
sleepDone := make(chan struct{})
|
||||
|
||||
// Start a sleep process which we will pretend is the parent.
|
||||
go func() {
|
||||
sleepCmd := exec.Command("sleep", "infinity")
|
||||
if !assert.NoError(t, sleepCmd.Start(), "failed to start sleep command") {
|
||||
return
|
||||
}
|
||||
sleepStart <- sleepCmd.Process.Pid
|
||||
defer close(sleepDone)
|
||||
<-sleepKill
|
||||
sleepCmd.Process.Kill()
|
||||
_ = sleepCmd.Wait()
|
||||
}()
|
||||
|
||||
client, workspace, agentToken := setupWorkspaceForAgent(t)
|
||||
go func() {
|
||||
defer close(agentReady)
|
||||
_ = agenttest.New(t, client.URL, agentToken)
|
||||
coderdtest.NewWorkspaceAgentWaiter(t, client, workspace.ID).WaitFor(coderdtest.AgentsReady)
|
||||
}()
|
||||
|
||||
clientOutput, clientInput := io.Pipe()
|
||||
serverOutput, serverInput := io.Pipe()
|
||||
defer func() {
|
||||
for _, c := range []io.Closer{clientOutput, clientInput, serverOutput, serverInput} {
|
||||
_ = c.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Start a connection to the agent once it's ready
|
||||
go func() {
|
||||
<-agentReady
|
||||
conn, channels, requests, err := ssh.NewClientConn(&testutil.ReaderWriterConn{
|
||||
Reader: serverOutput,
|
||||
Writer: clientInput,
|
||||
}, "", &ssh.ClientConfig{
|
||||
// #nosec
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
})
|
||||
if !assert.NoError(t, err, "failed to create SSH client connection") {
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
sshClient := ssh.NewClient(conn, channels, requests)
|
||||
defer sshClient.Close()
|
||||
|
||||
session, err := sshClient.NewSession()
|
||||
if !assert.NoError(t, err, "failed to create SSH session") {
|
||||
return
|
||||
}
|
||||
close(sessionStarted)
|
||||
<-sleepDone
|
||||
assert.NoError(t, session.Close())
|
||||
}()
|
||||
|
||||
// Wait for our "parent" process to start
|
||||
sleepPid := testutil.RequireReceive(ctx, t, sleepStart)
|
||||
// Wait for the agent to be ready
|
||||
testutil.SoftTryReceive(ctx, t, agentReady)
|
||||
inv, root := clitest.New(t, "ssh", "--stdio", workspace.Name, "--test.force-ppid", fmt.Sprintf("%d", sleepPid))
|
||||
clitest.SetupConfig(t, client, root)
|
||||
inv.Stdin = clientOutput
|
||||
inv.Stdout = serverInput
|
||||
inv.Stderr = io.Discard
|
||||
|
||||
// Start the command
|
||||
clitest.Start(t, inv.WithContext(ctx))
|
||||
|
||||
// Wait for a session to be established
|
||||
testutil.SoftTryReceive(ctx, t, sessionStarted)
|
||||
// Now kill the fake "parent"
|
||||
close(sleepKill)
|
||||
// The sleep process should exit
|
||||
testutil.SoftTryReceive(ctx, t, sleepDone)
|
||||
// And then the command should exit. This is tracked by clitest.Start.
|
||||
})
|
||||
|
||||
t.Run("ForwardAgent", func(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Test not supported on windows")
|
||||
|
||||
@@ -775,15 +775,15 @@ aibridge:
|
||||
# Maximum number of concurrent AI Bridge requests per replica. Set to 0 to disable
|
||||
# (unlimited).
|
||||
# (default: 0, type: int)
|
||||
maxConcurrency: 0
|
||||
max_concurrency: 0
|
||||
# Maximum number of AI Bridge requests per second per replica. Set to 0 to disable
|
||||
# (unlimited).
|
||||
# (default: 0, type: int)
|
||||
rateLimit: 0
|
||||
rate_limit: 0
|
||||
# Emit structured logs for AI Bridge interception records. Use this for exporting
|
||||
# these records to external SIEM or observability systems.
|
||||
# (default: false, type: bool)
|
||||
structuredLogging: false
|
||||
structured_logging: false
|
||||
# Once enabled, extra headers will be added to upstream requests to identify the
|
||||
# user (actor) making requests to AI Bridge. This is only needed if you are using
|
||||
# a proxy between AI Bridge and an upstream AI provider. This will send
|
||||
@@ -794,20 +794,20 @@ aibridge:
|
||||
# Enable the circuit breaker to protect against cascading failures from upstream
|
||||
# AI provider rate limits (429, 503, 529 overloaded).
|
||||
# (default: false, type: bool)
|
||||
circuitBreakerEnabled: false
|
||||
circuit_breaker_enabled: false
|
||||
# Number of consecutive failures that triggers the circuit breaker to open.
|
||||
# (default: 5, type: int)
|
||||
circuitBreakerFailureThreshold: 5
|
||||
circuit_breaker_failure_threshold: 5
|
||||
# Cyclic period of the closed state for clearing internal failure counts.
|
||||
# (default: 10s, type: duration)
|
||||
circuitBreakerInterval: 10s
|
||||
circuit_breaker_interval: 10s
|
||||
# How long the circuit breaker stays open before transitioning to half-open state.
|
||||
# (default: 30s, type: duration)
|
||||
circuitBreakerTimeout: 30s
|
||||
circuit_breaker_timeout: 30s
|
||||
# Maximum number of requests allowed in half-open state before deciding to close
|
||||
# or re-open the circuit.
|
||||
# (default: 3, type: int)
|
||||
circuitBreakerMaxRequests: 3
|
||||
circuit_breaker_max_requests: 3
|
||||
aibridgeproxy:
|
||||
# Enable the AI Bridge MITM Proxy for intercepting and decrypting AI provider
|
||||
# requests.
|
||||
|
||||
@@ -91,7 +91,7 @@ func (a *SubAgentAPI) CreateSubAgent(ctx context.Context, req *agentproto.Create
|
||||
Name: agentName,
|
||||
ResourceID: parentAgent.ResourceID,
|
||||
AuthToken: uuid.New(),
|
||||
AuthInstanceID: parentAgent.AuthInstanceID,
|
||||
AuthInstanceID: sql.NullString{},
|
||||
Architecture: req.Architecture,
|
||||
EnvironmentVariables: pqtype.NullRawMessage{},
|
||||
OperatingSystem: req.OperatingSystem,
|
||||
|
||||
@@ -175,6 +175,52 @@ func TestSubAgentAPI(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
// Context: https://github.com/coder/coder/pull/22196
|
||||
t.Run("CreateSubAgentDoesNotInheritAuthInstanceID", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
log = testutil.Logger(t)
|
||||
clock = quartz.NewMock(t)
|
||||
|
||||
db, org = newDatabaseWithOrg(t)
|
||||
user, agent = newUserWithWorkspaceAgent(t, db, org)
|
||||
)
|
||||
|
||||
// Given: The parent agent has an AuthInstanceID set
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
parentAgent, err := db.GetWorkspaceAgentByID(dbauthz.AsSystemRestricted(ctx), agent.ID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, parentAgent.AuthInstanceID.Valid, "parent agent should have an AuthInstanceID")
|
||||
require.NotEmpty(t, parentAgent.AuthInstanceID.String)
|
||||
|
||||
api := newAgentAPI(t, log, db, clock, user, org, agent)
|
||||
|
||||
// When: We create a sub agent
|
||||
createResp, err := api.CreateSubAgent(ctx, &proto.CreateSubAgentRequest{
|
||||
Name: "sub-agent",
|
||||
Directory: "/workspaces/test",
|
||||
Architecture: "amd64",
|
||||
OperatingSystem: "linux",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
subAgentID, err := uuid.FromBytes(createResp.Agent.Id)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: The sub-agent must NOT re-use the parent's AuthInstanceID.
|
||||
subAgent, err := db.GetWorkspaceAgentByID(dbauthz.AsSystemRestricted(ctx), subAgentID)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, subAgent.AuthInstanceID.Valid, "sub-agent should not have an AuthInstanceID")
|
||||
assert.Empty(t, subAgent.AuthInstanceID.String, "sub-agent AuthInstanceID string should be empty")
|
||||
|
||||
// Double-check: looking up by the parent's instance ID must
|
||||
// still return the parent, not the sub-agent.
|
||||
lookedUp, err := db.GetWorkspaceAgentByInstanceID(dbauthz.AsSystemRestricted(ctx), parentAgent.AuthInstanceID.String)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, parentAgent.ID, lookedUp.ID, "instance ID lookup should still return the parent agent")
|
||||
})
|
||||
|
||||
type expectedAppError struct {
|
||||
index int32
|
||||
field string
|
||||
|
||||
@@ -15066,10 +15066,6 @@ const docTemplate = `{
|
||||
"limit": {
|
||||
"type": "integer"
|
||||
},
|
||||
"soft_limit": {
|
||||
"description": "SoftLimit is the soft limit of the feature, and is only used for showing\nincluded limits in the dashboard. No license validation or warnings are\ngenerated from this value.",
|
||||
"type": "integer"
|
||||
},
|
||||
"usage_period": {
|
||||
"description": "UsagePeriod denotes that the usage is a counter that accumulates over\nthis period (and most likely resets with the issuance of the next\nlicense).\n\nThese dates are determined from the license that this entitlement comes\nfrom, see enterprise/coderd/license/license.go.\n\nOnly certain features set these fields:\n- FeatureManagedAgentLimit",
|
||||
"allOf": [
|
||||
|
||||
@@ -13623,10 +13623,6 @@
|
||||
"limit": {
|
||||
"type": "integer"
|
||||
},
|
||||
"soft_limit": {
|
||||
"description": "SoftLimit is the soft limit of the feature, and is only used for showing\nincluded limits in the dashboard. No license validation or warnings are\ngenerated from this value.",
|
||||
"type": "integer"
|
||||
},
|
||||
"usage_period": {
|
||||
"description": "UsagePeriod denotes that the usage is a counter that accumulates over\nthis period (and most likely resets with the issuance of the next\nlicense).\n\nThese dates are determined from the license that this entitlement comes\nfrom, see enterprise/coderd/license/license.go.\n\nOnly certain features set these fields:\n- FeatureManagedAgentLimit",
|
||||
"allOf": [
|
||||
|
||||
@@ -40,8 +40,10 @@
|
||||
// counters. When boundary logs are reported, Track() adds the IDs to the sets
|
||||
// and increments request counters.
|
||||
//
|
||||
// FlushToDB() writes stats to the database, replacing all values with the current
|
||||
// in-memory state. Stats accumulate in memory throughout the telemetry period.
|
||||
// FlushToDB() writes stats to the database only when there's been new activity
|
||||
// since the last flush. This prevents stale data from being written after a
|
||||
// telemetry reset when no new usage occurred. Stats accumulate in memory
|
||||
// throughout the telemetry period.
|
||||
//
|
||||
// A new period is detected when the upsert results in an INSERT (meaning
|
||||
// telemetry deleted the replica's row). At that point, all in-memory stats are
|
||||
|
||||
@@ -14,21 +14,40 @@ import (
|
||||
|
||||
// Tracker tracks boundary usage for telemetry reporting.
|
||||
//
|
||||
// All stats accumulate in memory throughout a telemetry period and are only
|
||||
// reset when a new period begins.
|
||||
// Unique user/workspace counts are tracked both cumulatively and as deltas since
|
||||
// the last flush. The delta is needed because when a new telemetry period starts
|
||||
// (the DB row is deleted), we must only insert data accumulated since the last
|
||||
// flush. If we used cumulative values, stale data from the previous period would
|
||||
// be written to the new row and then lost when subsequent updates overwrite it.
|
||||
//
|
||||
// Request counts are tracked as deltas and accumulated in the database.
|
||||
type Tracker struct {
|
||||
mu sync.Mutex
|
||||
workspaces map[uuid.UUID]struct{}
|
||||
users map[uuid.UUID]struct{}
|
||||
mu sync.Mutex
|
||||
|
||||
// Cumulative unique counts for the current period (used on UPDATE to
|
||||
// replace the DB value with accurate totals).
|
||||
workspaces map[uuid.UUID]struct{}
|
||||
users map[uuid.UUID]struct{}
|
||||
|
||||
// Delta unique counts since last flush (used on INSERT to avoid writing
|
||||
// stale data from the previous period).
|
||||
workspacesDelta map[uuid.UUID]struct{}
|
||||
usersDelta map[uuid.UUID]struct{}
|
||||
|
||||
// Request deltas (always reset when flushing, accumulated in DB).
|
||||
allowedRequests int64
|
||||
deniedRequests int64
|
||||
|
||||
usageSinceLastFlush bool
|
||||
}
|
||||
|
||||
// NewTracker creates a new boundary usage tracker.
|
||||
func NewTracker() *Tracker {
|
||||
return &Tracker{
|
||||
workspaces: make(map[uuid.UUID]struct{}),
|
||||
users: make(map[uuid.UUID]struct{}),
|
||||
workspaces: make(map[uuid.UUID]struct{}),
|
||||
users: make(map[uuid.UUID]struct{}),
|
||||
workspacesDelta: make(map[uuid.UUID]struct{}),
|
||||
usersDelta: make(map[uuid.UUID]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,50 +58,68 @@ func (t *Tracker) Track(workspaceID, ownerID uuid.UUID, allowed, denied int64) {
|
||||
|
||||
t.workspaces[workspaceID] = struct{}{}
|
||||
t.users[ownerID] = struct{}{}
|
||||
t.workspacesDelta[workspaceID] = struct{}{}
|
||||
t.usersDelta[ownerID] = struct{}{}
|
||||
t.allowedRequests += allowed
|
||||
t.deniedRequests += denied
|
||||
t.usageSinceLastFlush = true
|
||||
}
|
||||
|
||||
// FlushToDB writes the accumulated stats to the database. All values are
|
||||
// replaced in the database (they represent the current in-memory state). If the
|
||||
// database row was deleted (new telemetry period), all in-memory stats are reset.
|
||||
// FlushToDB writes stats to the database. For unique counts, cumulative values
|
||||
// are used on UPDATE (replacing the DB value) while delta values are used on
|
||||
// INSERT (starting fresh). Request counts are always deltas, accumulated in DB.
|
||||
// All deltas are reset immediately after snapshot so Track() calls during the
|
||||
// DB operation are preserved for the next flush.
|
||||
func (t *Tracker) FlushToDB(ctx context.Context, db database.Store, replicaID uuid.UUID) error {
|
||||
t.mu.Lock()
|
||||
workspaceCount := int64(len(t.workspaces))
|
||||
userCount := int64(len(t.users))
|
||||
allowed := t.allowedRequests
|
||||
denied := t.deniedRequests
|
||||
t.mu.Unlock()
|
||||
|
||||
// Don't flush if there's no activity.
|
||||
if workspaceCount == 0 && userCount == 0 && allowed == 0 && denied == 0 {
|
||||
if !t.usageSinceLastFlush {
|
||||
t.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Snapshot all values.
|
||||
workspaceCount := int64(len(t.workspaces)) // cumulative, for UPDATE
|
||||
userCount := int64(len(t.users)) // cumulative, for UPDATE
|
||||
workspaceDelta := int64(len(t.workspacesDelta)) // delta, for INSERT
|
||||
userDelta := int64(len(t.usersDelta)) // delta, for INSERT
|
||||
allowed := t.allowedRequests // delta, accumulated in DB
|
||||
denied := t.deniedRequests // delta, accumulated in DB
|
||||
|
||||
// Reset all deltas immediately so Track() calls during the DB operation
|
||||
// below are preserved for the next flush.
|
||||
t.workspacesDelta = make(map[uuid.UUID]struct{})
|
||||
t.usersDelta = make(map[uuid.UUID]struct{})
|
||||
t.allowedRequests = 0
|
||||
t.deniedRequests = 0
|
||||
t.usageSinceLastFlush = false
|
||||
t.mu.Unlock()
|
||||
|
||||
//nolint:gocritic // This is the actual package doing boundary usage tracking.
|
||||
newPeriod, err := db.UpsertBoundaryUsageStats(dbauthz.AsBoundaryUsageTracker(ctx), database.UpsertBoundaryUsageStatsParams{
|
||||
_, err := db.UpsertBoundaryUsageStats(dbauthz.AsBoundaryUsageTracker(ctx), database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replicaID,
|
||||
UniqueWorkspacesCount: workspaceCount,
|
||||
UniqueUsersCount: userCount,
|
||||
UniqueWorkspacesCount: workspaceCount, // cumulative, for UPDATE
|
||||
UniqueUsersCount: userCount, // cumulative, for UPDATE
|
||||
UniqueWorkspacesDelta: workspaceDelta, // delta, for INSERT
|
||||
UniqueUsersDelta: userDelta, // delta, for INSERT
|
||||
AllowedRequests: allowed,
|
||||
DeniedRequests: denied,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If this was an insert (new period), reset all stats. Any Track() calls
|
||||
// that occurred during the DB operation will be counted in the next period.
|
||||
if newPeriod {
|
||||
t.mu.Lock()
|
||||
t.workspaces = make(map[uuid.UUID]struct{})
|
||||
t.users = make(map[uuid.UUID]struct{})
|
||||
t.allowedRequests = 0
|
||||
t.deniedRequests = 0
|
||||
t.mu.Unlock()
|
||||
// Always reset cumulative counts to prevent unbounded memory growth (e.g.
|
||||
// if the DB is unreachable). Copy delta maps to preserve any Track() calls
|
||||
// that occurred during the DB operation above.
|
||||
t.mu.Lock()
|
||||
t.workspaces = make(map[uuid.UUID]struct{})
|
||||
t.users = make(map[uuid.UUID]struct{})
|
||||
for id := range t.workspacesDelta {
|
||||
t.workspaces[id] = struct{}{}
|
||||
}
|
||||
for id := range t.usersDelta {
|
||||
t.users[id] = struct{}{}
|
||||
}
|
||||
t.mu.Unlock()
|
||||
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
// StartFlushLoop begins the periodic flush loop that writes accumulated stats
|
||||
|
||||
@@ -159,23 +159,18 @@ func TestTracker_FlushToDB_Accumulates(t *testing.T) {
|
||||
workspaceID := uuid.New()
|
||||
ownerID := uuid.New()
|
||||
|
||||
// First flush is an insert, resets unique counts (new period).
|
||||
tracker.Track(workspaceID, ownerID, 5, 3)
|
||||
|
||||
// First flush is an insert, which resets in-memory stats.
|
||||
err := tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Track more data after the reset.
|
||||
// Track & flush more data. Same workspace/user, so unique counts stay at 1.
|
||||
tracker.Track(workspaceID, ownerID, 2, 1)
|
||||
|
||||
// Second flush is an update so stats should accumulate.
|
||||
err = tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Track even more data.
|
||||
// Track & flush even more data to continue accumulation.
|
||||
tracker.Track(workspaceID, ownerID, 3, 2)
|
||||
|
||||
// Third flush stats should continue accumulating.
|
||||
err = tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -184,8 +179,8 @@ func TestTracker_FlushToDB_Accumulates(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(1), summary.UniqueUsers)
|
||||
require.Equal(t, int64(5), summary.AllowedRequests, "should accumulate after first reset: 2+3=5")
|
||||
require.Equal(t, int64(3), summary.DeniedRequests, "should accumulate after first reset: 1+2=3")
|
||||
require.Equal(t, int64(5+2+3), summary.AllowedRequests)
|
||||
require.Equal(t, int64(3+1+2), summary.DeniedRequests)
|
||||
}
|
||||
|
||||
func TestTracker_FlushToDB_NewPeriod(t *testing.T) {
|
||||
@@ -256,15 +251,24 @@ func TestUpsertBoundaryUsageStats_Insert(t *testing.T) {
|
||||
|
||||
replicaID := uuid.New()
|
||||
|
||||
// Set different values for delta vs cumulative to verify INSERT uses delta.
|
||||
newPeriod, err := db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replicaID,
|
||||
UniqueWorkspacesCount: 5,
|
||||
UniqueUsersCount: 3,
|
||||
UniqueWorkspacesDelta: 5,
|
||||
UniqueUsersDelta: 3,
|
||||
UniqueWorkspacesCount: 999, // should be ignored on INSERT
|
||||
UniqueUsersCount: 999, // should be ignored on INSERT
|
||||
AllowedRequests: 100,
|
||||
DeniedRequests: 10,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.True(t, newPeriod, "should return true for insert")
|
||||
|
||||
// Verify INSERT used the delta values, not cumulative.
|
||||
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(5), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(3), summary.UniqueUsers)
|
||||
}
|
||||
|
||||
func TestUpsertBoundaryUsageStats_Update(t *testing.T) {
|
||||
@@ -275,34 +279,34 @@ func TestUpsertBoundaryUsageStats_Update(t *testing.T) {
|
||||
|
||||
replicaID := uuid.New()
|
||||
|
||||
// First insert.
|
||||
// First insert uses delta fields.
|
||||
_, err := db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replicaID,
|
||||
UniqueWorkspacesCount: 5,
|
||||
UniqueUsersCount: 3,
|
||||
UniqueWorkspacesDelta: 5,
|
||||
UniqueUsersDelta: 3,
|
||||
AllowedRequests: 100,
|
||||
DeniedRequests: 10,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Second upsert (update).
|
||||
// Second upsert (update). Set different delta vs cumulative to verify UPDATE uses cumulative.
|
||||
newPeriod, err := db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replicaID,
|
||||
UniqueWorkspacesCount: 8,
|
||||
UniqueUsersCount: 5,
|
||||
UniqueWorkspacesCount: 8, // cumulative, should be used
|
||||
UniqueUsersCount: 5, // cumulative, should be used
|
||||
AllowedRequests: 200,
|
||||
DeniedRequests: 20,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.False(t, newPeriod, "should return false for update")
|
||||
|
||||
// Verify the update took effect.
|
||||
// Verify UPDATE used cumulative values.
|
||||
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(8), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(5), summary.UniqueUsers)
|
||||
require.Equal(t, int64(200), summary.AllowedRequests)
|
||||
require.Equal(t, int64(20), summary.DeniedRequests)
|
||||
require.Equal(t, int64(100+200), summary.AllowedRequests)
|
||||
require.Equal(t, int64(10+20), summary.DeniedRequests)
|
||||
}
|
||||
|
||||
func TestGetBoundaryUsageSummary_MultipleReplicas(t *testing.T) {
|
||||
@@ -315,11 +319,11 @@ func TestGetBoundaryUsageSummary_MultipleReplicas(t *testing.T) {
|
||||
replica2 := uuid.New()
|
||||
replica3 := uuid.New()
|
||||
|
||||
// Insert stats for 3 replicas.
|
||||
// Insert stats for 3 replicas. Delta fields are used for INSERT.
|
||||
_, err := db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replica1,
|
||||
UniqueWorkspacesCount: 10,
|
||||
UniqueUsersCount: 5,
|
||||
UniqueWorkspacesDelta: 10,
|
||||
UniqueUsersDelta: 5,
|
||||
AllowedRequests: 100,
|
||||
DeniedRequests: 10,
|
||||
})
|
||||
@@ -327,8 +331,8 @@ func TestGetBoundaryUsageSummary_MultipleReplicas(t *testing.T) {
|
||||
|
||||
_, err = db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replica2,
|
||||
UniqueWorkspacesCount: 15,
|
||||
UniqueUsersCount: 8,
|
||||
UniqueWorkspacesDelta: 15,
|
||||
UniqueUsersDelta: 8,
|
||||
AllowedRequests: 150,
|
||||
DeniedRequests: 15,
|
||||
})
|
||||
@@ -336,8 +340,8 @@ func TestGetBoundaryUsageSummary_MultipleReplicas(t *testing.T) {
|
||||
|
||||
_, err = db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replica3,
|
||||
UniqueWorkspacesCount: 20,
|
||||
UniqueUsersCount: 12,
|
||||
UniqueWorkspacesDelta: 20,
|
||||
UniqueUsersDelta: 12,
|
||||
AllowedRequests: 200,
|
||||
DeniedRequests: 20,
|
||||
})
|
||||
@@ -375,12 +379,12 @@ func TestResetBoundaryUsageStats(t *testing.T) {
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := dbauthz.AsBoundaryUsageTracker(context.Background())
|
||||
|
||||
// Insert stats for multiple replicas.
|
||||
// Insert stats for multiple replicas. Delta fields are used for INSERT.
|
||||
for i := 0; i < 5; i++ {
|
||||
_, err := db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: uuid.New(),
|
||||
UniqueWorkspacesCount: int64(i + 1),
|
||||
UniqueUsersCount: int64(i + 1),
|
||||
UniqueWorkspacesDelta: int64(i + 1),
|
||||
UniqueUsersDelta: int64(i + 1),
|
||||
AllowedRequests: int64((i + 1) * 10),
|
||||
DeniedRequests: int64(i + 1),
|
||||
})
|
||||
@@ -412,11 +416,11 @@ func TestDeleteBoundaryUsageStatsByReplicaID(t *testing.T) {
|
||||
replica1 := uuid.New()
|
||||
replica2 := uuid.New()
|
||||
|
||||
// Insert stats for 2 replicas.
|
||||
// Insert stats for 2 replicas. Delta fields are used for INSERT.
|
||||
_, err := db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replica1,
|
||||
UniqueWorkspacesCount: 10,
|
||||
UniqueUsersCount: 5,
|
||||
UniqueWorkspacesDelta: 10,
|
||||
UniqueUsersDelta: 5,
|
||||
AllowedRequests: 100,
|
||||
DeniedRequests: 10,
|
||||
})
|
||||
@@ -424,8 +428,8 @@ func TestDeleteBoundaryUsageStatsByReplicaID(t *testing.T) {
|
||||
|
||||
_, err = db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replica2,
|
||||
UniqueWorkspacesCount: 20,
|
||||
UniqueUsersCount: 10,
|
||||
UniqueWorkspacesDelta: 20,
|
||||
UniqueUsersDelta: 10,
|
||||
AllowedRequests: 200,
|
||||
DeniedRequests: 20,
|
||||
})
|
||||
@@ -497,6 +501,49 @@ func TestTracker_TelemetryCycle(t *testing.T) {
|
||||
require.Equal(t, int64(1), summary.AllowedRequests)
|
||||
}
|
||||
|
||||
func TestTracker_FlushToDB_NoStaleDataAfterReset(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
|
||||
tracker := boundaryusage.NewTracker()
|
||||
replicaID := uuid.New()
|
||||
workspaceID := uuid.New()
|
||||
ownerID := uuid.New()
|
||||
|
||||
// Track some data, flush, and verify.
|
||||
tracker.Track(workspaceID, ownerID, 10, 5)
|
||||
err := tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(10), summary.AllowedRequests)
|
||||
|
||||
// Simulate telemetry reset (new period).
|
||||
err = db.ResetBoundaryUsageStats(boundaryCtx)
|
||||
require.NoError(t, err)
|
||||
summary, err = db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0), summary.AllowedRequests)
|
||||
|
||||
// Flush again without any new Track() calls. This should not write stale
|
||||
// data back to the DB.
|
||||
err = tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Summary should be empty (no stale data written).
|
||||
summary, err = db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(0), summary.UniqueUsers)
|
||||
require.Equal(t, int64(0), summary.AllowedRequests)
|
||||
require.Equal(t, int64(0), summary.DeniedRequests)
|
||||
}
|
||||
|
||||
func TestTracker_ConcurrentFlushAndTrack(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -540,3 +587,57 @@ func TestTracker_ConcurrentFlushAndTrack(t *testing.T) {
|
||||
require.GreaterOrEqual(t, summary.AllowedRequests, int64(0))
|
||||
require.GreaterOrEqual(t, summary.DeniedRequests, int64(0))
|
||||
}
|
||||
|
||||
// trackDuringUpsertDB wraps a database.Store to call Track() during the
|
||||
// UpsertBoundaryUsageStats operation, simulating a concurrent Track() call.
|
||||
type trackDuringUpsertDB struct {
|
||||
database.Store
|
||||
tracker *boundaryusage.Tracker
|
||||
workspaceID uuid.UUID
|
||||
userID uuid.UUID
|
||||
}
|
||||
|
||||
func (s *trackDuringUpsertDB) UpsertBoundaryUsageStats(ctx context.Context, arg database.UpsertBoundaryUsageStatsParams) (bool, error) {
|
||||
s.tracker.Track(s.workspaceID, s.userID, 20, 10)
|
||||
return s.Store.UpsertBoundaryUsageStats(ctx, arg)
|
||||
}
|
||||
|
||||
func TestTracker_TrackDuringFlush(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
|
||||
tracker := boundaryusage.NewTracker()
|
||||
replicaID := uuid.New()
|
||||
|
||||
// Track some initial data.
|
||||
tracker.Track(uuid.New(), uuid.New(), 10, 5)
|
||||
|
||||
trackingDB := &trackDuringUpsertDB{
|
||||
Store: db,
|
||||
tracker: tracker,
|
||||
workspaceID: uuid.New(),
|
||||
userID: uuid.New(),
|
||||
}
|
||||
|
||||
// Flush will call Track() during the DB operation.
|
||||
err := tracker.FlushToDB(ctx, trackingDB, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify first flush only wrote the initial data.
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(10), summary.AllowedRequests)
|
||||
|
||||
// The second flush should include the Track() call that happened during the
|
||||
// first flush's DB operation.
|
||||
err = tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
summary, err = db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(10+20), summary.AllowedRequests)
|
||||
require.Equal(t, int64(5+10), summary.DeniedRequests)
|
||||
}
|
||||
|
||||
@@ -106,6 +106,8 @@ import (
|
||||
"github.com/coder/quartz"
|
||||
)
|
||||
|
||||
const DefaultDERPMeshKey = "test-key"
|
||||
|
||||
const defaultTestDaemonName = "test-daemon"
|
||||
|
||||
type Options struct {
|
||||
@@ -510,8 +512,18 @@ func NewOptions(t testing.TB, options *Options) (func(http.Handler), context.Can
|
||||
stunAddresses = options.DeploymentValues.DERP.Server.STUNAddresses.Value()
|
||||
}
|
||||
|
||||
derpServer := derp.NewServer(key.NewNode(), tailnet.Logger(options.Logger.Named("derp").Leveled(slog.LevelDebug)))
|
||||
derpServer.SetMeshKey("test-key")
|
||||
const derpMeshKey = "test-key"
|
||||
// Technically AGPL coderd servers don't set this value, but it doesn't
|
||||
// change any behavior. It's useful for enterprise tests.
|
||||
err = options.Database.InsertDERPMeshKey(dbauthz.AsSystemRestricted(ctx), derpMeshKey) //nolint:gocritic // test
|
||||
if !database.IsUniqueViolation(err, database.UniqueSiteConfigsKeyKey) {
|
||||
require.NoError(t, err, "insert DERP mesh key")
|
||||
}
|
||||
var derpServer *derp.Server
|
||||
if options.DeploymentValues.DERP.Server.Enable.Value() {
|
||||
derpServer = derp.NewServer(key.NewNode(), tailnet.Logger(options.Logger.Named("derp").Leveled(slog.LevelDebug)))
|
||||
derpServer.SetMeshKey(derpMeshKey)
|
||||
}
|
||||
|
||||
// match default with cli default
|
||||
if options.SSHKeygenAlgorithm == "" {
|
||||
|
||||
@@ -762,9 +762,10 @@ type sqlcQuerier interface {
|
||||
UpsertAnnouncementBanners(ctx context.Context, value string) error
|
||||
UpsertAppSecurityKey(ctx context.Context, value string) error
|
||||
UpsertApplicationName(ctx context.Context, value string) error
|
||||
// Upserts boundary usage statistics for a replica. All values are replaced with
|
||||
// the current in-memory state. Returns true if this was an insert (new period),
|
||||
// false if update.
|
||||
// Upserts boundary usage statistics for a replica. On INSERT (new period), uses
|
||||
// delta values for unique counts (only data since last flush). On UPDATE, uses
|
||||
// cumulative values for unique counts (accurate period totals). Request counts
|
||||
// are always deltas, accumulated in DB. Returns true if insert, false if update.
|
||||
UpsertBoundaryUsageStats(ctx context.Context, arg UpsertBoundaryUsageStatsParams) (bool, error)
|
||||
UpsertConnectionLog(ctx context.Context, arg UpsertConnectionLogParams) (ConnectionLog, error)
|
||||
UpsertCoordinatorResumeTokenSigningKey(ctx context.Context, value string) error
|
||||
|
||||
@@ -6271,6 +6271,56 @@ func TestGetWorkspaceAgentsByParentID(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetWorkspaceAgentByInstanceID(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Context: https://github.com/coder/coder/pull/22196
|
||||
t.Run("DoesNotReturnSubAgents", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Given: A parent workspace agent with an AuthInstanceID and a
|
||||
// sub-agent that shares the same AuthInstanceID.
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
org := dbgen.Organization(t, db, database.Organization{})
|
||||
job := dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
|
||||
Type: database.ProvisionerJobTypeTemplateVersionImport,
|
||||
OrganizationID: org.ID,
|
||||
})
|
||||
resource := dbgen.WorkspaceResource(t, db, database.WorkspaceResource{
|
||||
JobID: job.ID,
|
||||
})
|
||||
|
||||
authInstanceID := fmt.Sprintf("instance-%s-%d", t.Name(), time.Now().UnixNano())
|
||||
parentAgent := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{
|
||||
ResourceID: resource.ID,
|
||||
AuthInstanceID: sql.NullString{
|
||||
String: authInstanceID,
|
||||
Valid: true,
|
||||
},
|
||||
})
|
||||
// Create a sub-agent with the same AuthInstanceID (simulating
|
||||
// the old behavior before the fix).
|
||||
_ = dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{
|
||||
ParentID: uuid.NullUUID{UUID: parentAgent.ID, Valid: true},
|
||||
ResourceID: resource.ID,
|
||||
AuthInstanceID: sql.NullString{
|
||||
String: authInstanceID,
|
||||
Valid: true,
|
||||
},
|
||||
})
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
// When: We look up the agent by instance ID.
|
||||
agent, err := db.GetWorkspaceAgentByInstanceID(ctx, authInstanceID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: The result must be the parent agent, not the sub-agent.
|
||||
assert.Equal(t, parentAgent.ID, agent.ID, "instance ID lookup should return the parent agent, not a sub-agent")
|
||||
assert.False(t, agent.ParentID.Valid, "returned agent should not have a parent (should be the parent itself)")
|
||||
})
|
||||
}
|
||||
|
||||
func requireUsersMatch(t testing.TB, expected []database.User, found []database.GetUsersRow, msg string) {
|
||||
t.Helper()
|
||||
require.ElementsMatch(t, expected, database.ConvertUserRows(found), msg)
|
||||
|
||||
@@ -2051,32 +2051,37 @@ INSERT INTO boundary_usage_stats (
|
||||
NOW(),
|
||||
NOW()
|
||||
) ON CONFLICT (replica_id) DO UPDATE SET
|
||||
unique_workspaces_count = EXCLUDED.unique_workspaces_count,
|
||||
unique_users_count = EXCLUDED.unique_users_count,
|
||||
allowed_requests = EXCLUDED.allowed_requests,
|
||||
denied_requests = EXCLUDED.denied_requests,
|
||||
unique_workspaces_count = $6,
|
||||
unique_users_count = $7,
|
||||
allowed_requests = boundary_usage_stats.allowed_requests + EXCLUDED.allowed_requests,
|
||||
denied_requests = boundary_usage_stats.denied_requests + EXCLUDED.denied_requests,
|
||||
updated_at = NOW()
|
||||
RETURNING (xmax = 0) AS new_period
|
||||
`
|
||||
|
||||
type UpsertBoundaryUsageStatsParams struct {
|
||||
ReplicaID uuid.UUID `db:"replica_id" json:"replica_id"`
|
||||
UniqueWorkspacesCount int64 `db:"unique_workspaces_count" json:"unique_workspaces_count"`
|
||||
UniqueUsersCount int64 `db:"unique_users_count" json:"unique_users_count"`
|
||||
UniqueWorkspacesDelta int64 `db:"unique_workspaces_delta" json:"unique_workspaces_delta"`
|
||||
UniqueUsersDelta int64 `db:"unique_users_delta" json:"unique_users_delta"`
|
||||
AllowedRequests int64 `db:"allowed_requests" json:"allowed_requests"`
|
||||
DeniedRequests int64 `db:"denied_requests" json:"denied_requests"`
|
||||
UniqueWorkspacesCount int64 `db:"unique_workspaces_count" json:"unique_workspaces_count"`
|
||||
UniqueUsersCount int64 `db:"unique_users_count" json:"unique_users_count"`
|
||||
}
|
||||
|
||||
// Upserts boundary usage statistics for a replica. All values are replaced with
|
||||
// the current in-memory state. Returns true if this was an insert (new period),
|
||||
// false if update.
|
||||
// Upserts boundary usage statistics for a replica. On INSERT (new period), uses
|
||||
// delta values for unique counts (only data since last flush). On UPDATE, uses
|
||||
// cumulative values for unique counts (accurate period totals). Request counts
|
||||
// are always deltas, accumulated in DB. Returns true if insert, false if update.
|
||||
func (q *sqlQuerier) UpsertBoundaryUsageStats(ctx context.Context, arg UpsertBoundaryUsageStatsParams) (bool, error) {
|
||||
row := q.db.QueryRowContext(ctx, upsertBoundaryUsageStats,
|
||||
arg.ReplicaID,
|
||||
arg.UniqueWorkspacesCount,
|
||||
arg.UniqueUsersCount,
|
||||
arg.UniqueWorkspacesDelta,
|
||||
arg.UniqueUsersDelta,
|
||||
arg.AllowedRequests,
|
||||
arg.DeniedRequests,
|
||||
arg.UniqueWorkspacesCount,
|
||||
arg.UniqueUsersCount,
|
||||
)
|
||||
var new_period bool
|
||||
err := row.Scan(&new_period)
|
||||
@@ -18221,6 +18226,8 @@ WHERE
|
||||
auth_instance_id = $1 :: TEXT
|
||||
-- Filter out deleted sub agents.
|
||||
AND deleted = FALSE
|
||||
-- Filter out sub agents, they do not authenticate with auth_instance_id.
|
||||
AND parent_id IS NULL
|
||||
ORDER BY
|
||||
created_at DESC
|
||||
`
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
-- name: UpsertBoundaryUsageStats :one
|
||||
-- Upserts boundary usage statistics for a replica. All values are replaced with
|
||||
-- the current in-memory state. Returns true if this was an insert (new period),
|
||||
-- false if update.
|
||||
-- Upserts boundary usage statistics for a replica. On INSERT (new period), uses
|
||||
-- delta values for unique counts (only data since last flush). On UPDATE, uses
|
||||
-- cumulative values for unique counts (accurate period totals). Request counts
|
||||
-- are always deltas, accumulated in DB. Returns true if insert, false if update.
|
||||
INSERT INTO boundary_usage_stats (
|
||||
replica_id,
|
||||
unique_workspaces_count,
|
||||
@@ -12,17 +13,17 @@ INSERT INTO boundary_usage_stats (
|
||||
updated_at
|
||||
) VALUES (
|
||||
@replica_id,
|
||||
@unique_workspaces_count,
|
||||
@unique_users_count,
|
||||
@unique_workspaces_delta,
|
||||
@unique_users_delta,
|
||||
@allowed_requests,
|
||||
@denied_requests,
|
||||
NOW(),
|
||||
NOW()
|
||||
) ON CONFLICT (replica_id) DO UPDATE SET
|
||||
unique_workspaces_count = EXCLUDED.unique_workspaces_count,
|
||||
unique_users_count = EXCLUDED.unique_users_count,
|
||||
allowed_requests = EXCLUDED.allowed_requests,
|
||||
denied_requests = EXCLUDED.denied_requests,
|
||||
unique_workspaces_count = @unique_workspaces_count,
|
||||
unique_users_count = @unique_users_count,
|
||||
allowed_requests = boundary_usage_stats.allowed_requests + EXCLUDED.allowed_requests,
|
||||
denied_requests = boundary_usage_stats.denied_requests + EXCLUDED.denied_requests,
|
||||
updated_at = NOW()
|
||||
RETURNING (xmax = 0) AS new_period;
|
||||
|
||||
|
||||
@@ -17,6 +17,8 @@ WHERE
|
||||
auth_instance_id = @auth_instance_id :: TEXT
|
||||
-- Filter out deleted sub agents.
|
||||
AND deleted = FALSE
|
||||
-- Filter out sub agents, they do not authenticate with auth_instance_id.
|
||||
AND parent_id IS NULL
|
||||
ORDER BY
|
||||
created_at DESC;
|
||||
|
||||
|
||||
@@ -238,9 +238,18 @@ func (api *API) paginatedMembers(rw http.ResponseWriter, r *http.Request) {
|
||||
memberRows = append(memberRows, row)
|
||||
}
|
||||
|
||||
if len(paginatedMemberRows) == 0 {
|
||||
httpapi.Write(ctx, rw, http.StatusOK, codersdk.PaginatedMembersResponse{
|
||||
Members: []codersdk.OrganizationMemberWithUserData{},
|
||||
Count: 0,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
members, err := convertOrganizationMembersWithUserData(ctx, api.Database, memberRows)
|
||||
if err != nil {
|
||||
httpapi.InternalServerError(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
resp := codersdk.PaginatedMembersResponse{
|
||||
|
||||
@@ -65,6 +65,7 @@ type StateSnapshotter interface {
|
||||
type Claimer interface {
|
||||
Claim(
|
||||
ctx context.Context,
|
||||
store database.Store,
|
||||
now time.Time,
|
||||
userID uuid.UUID,
|
||||
name string,
|
||||
|
||||
@@ -34,7 +34,7 @@ var DefaultReconciler ReconciliationOrchestrator = NoopReconciler{}
|
||||
|
||||
type NoopClaimer struct{}
|
||||
|
||||
func (NoopClaimer) Claim(context.Context, time.Time, uuid.UUID, string, uuid.UUID, sql.NullString, sql.NullTime, sql.NullInt64) (*uuid.UUID, error) {
|
||||
func (NoopClaimer) Claim(context.Context, database.Store, time.Time, uuid.UUID, string, uuid.UUID, sql.NullString, sql.NullTime, sql.NullInt64) (*uuid.UUID, error) {
|
||||
// Not entitled to claim prebuilds in AGPL version.
|
||||
return nil, ErrAGPLDoesNotSupportPrebuiltWorkspaces
|
||||
}
|
||||
|
||||
@@ -177,7 +177,7 @@ func generateFromPrompt(prompt string) (TaskName, error) {
|
||||
// Ensure display name is never empty
|
||||
displayName = strings.ReplaceAll(name, "-", " ")
|
||||
}
|
||||
displayName = strings.ToUpper(displayName[:1]) + displayName[1:]
|
||||
displayName = strutil.Capitalize(displayName)
|
||||
|
||||
return TaskName{
|
||||
Name: taskName,
|
||||
@@ -269,7 +269,7 @@ func generateFromAnthropic(ctx context.Context, prompt string, apiKey string, mo
|
||||
// Ensure display name is never empty
|
||||
displayName = strings.ReplaceAll(taskNameResponse.Name, "-", " ")
|
||||
}
|
||||
displayName = strings.ToUpper(displayName[:1]) + displayName[1:]
|
||||
displayName = strutil.Capitalize(displayName)
|
||||
|
||||
return TaskName{
|
||||
Name: name,
|
||||
|
||||
@@ -49,6 +49,19 @@ func TestGenerate(t *testing.T) {
|
||||
require.NotEmpty(t, taskName.DisplayName)
|
||||
})
|
||||
|
||||
t.Run("FromPromptMultiByte", func(t *testing.T) {
|
||||
t.Setenv("ANTHROPIC_API_KEY", "")
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
taskName := taskname.Generate(ctx, testutil.Logger(t), "über cool feature")
|
||||
|
||||
require.NoError(t, codersdk.NameValid(taskName.Name))
|
||||
require.True(t, len(taskName.DisplayName) > 0)
|
||||
// The display name must start with "Ü", not corrupted bytes.
|
||||
require.Equal(t, "Über cool feature", taskName.DisplayName)
|
||||
})
|
||||
|
||||
t.Run("Fallback", func(t *testing.T) {
|
||||
// Ensure no API key
|
||||
t.Setenv("ANTHROPIC_API_KEY", "")
|
||||
|
||||
@@ -43,6 +43,8 @@ const (
|
||||
// VersionHeader is sent in every telemetry request to
|
||||
// report the semantic version of Coder.
|
||||
VersionHeader = "X-Coder-Version"
|
||||
|
||||
DefaultSnapshotFrequency = 30 * time.Minute
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
@@ -71,8 +73,7 @@ func New(options Options) (Reporter, error) {
|
||||
options.Clock = quartz.NewReal()
|
||||
}
|
||||
if options.SnapshotFrequency == 0 {
|
||||
// Report once every 30mins by default!
|
||||
options.SnapshotFrequency = 30 * time.Minute
|
||||
options.SnapshotFrequency = DefaultSnapshotFrequency
|
||||
}
|
||||
snapshotURL, err := options.URL.Parse("/snapshot")
|
||||
if err != nil {
|
||||
@@ -881,16 +882,20 @@ func (r *remoteReporter) collectBoundaryUsageSummary(ctx context.Context) (*Boun
|
||||
}
|
||||
|
||||
// Reset stats after capturing the summary. This deletes all rows so each
|
||||
// replica will detect a new period on their next flush.
|
||||
// replica will detect a new period on their next flush. Note: there is a
|
||||
// known race condition here that may result in a small telemetry inaccuracy
|
||||
// with multiple replicas (https://github.com/coder/coder/issues/21770).
|
||||
if err := r.options.Database.ResetBoundaryUsageStats(boundaryCtx); err != nil {
|
||||
return nil, xerrors.Errorf("reset boundary usage stats: %w", err)
|
||||
}
|
||||
|
||||
return &BoundaryUsageSummary{
|
||||
UniqueWorkspaces: summary.UniqueWorkspaces,
|
||||
UniqueUsers: summary.UniqueUsers,
|
||||
AllowedRequests: summary.AllowedRequests,
|
||||
DeniedRequests: summary.DeniedRequests,
|
||||
UniqueWorkspaces: summary.UniqueWorkspaces,
|
||||
UniqueUsers: summary.UniqueUsers,
|
||||
AllowedRequests: summary.AllowedRequests,
|
||||
DeniedRequests: summary.DeniedRequests,
|
||||
PeriodStart: now.Add(-r.options.SnapshotFrequency),
|
||||
PeriodDurationMilliseconds: r.options.SnapshotFrequency.Milliseconds(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -2054,12 +2059,29 @@ type AIBridgeInterceptionsSummary struct {
|
||||
}
|
||||
|
||||
// BoundaryUsageSummary contains aggregated boundary usage statistics across all
|
||||
// replicas for the telemetry period.
|
||||
// replicas for the telemetry period. See the boundaryusage package documentation
|
||||
// for the full tracking architecture.
|
||||
type BoundaryUsageSummary struct {
|
||||
UniqueWorkspaces int64 `json:"unique_workspaces"`
|
||||
UniqueUsers int64 `json:"unique_users"`
|
||||
AllowedRequests int64 `json:"allowed_requests"`
|
||||
DeniedRequests int64 `json:"denied_requests"`
|
||||
|
||||
// PeriodStart and PeriodDurationMilliseconds describe the approximate collection
|
||||
// window. The actual data may not align *exactly* to these boundaries because:
|
||||
//
|
||||
// - Each replica flushes to the database independently on its own schedule
|
||||
// - The summary captures "data flushed since last reset" rather than "usage
|
||||
// during exactly the stated interval"
|
||||
// - Unflushed in-memory data at snapshot time rolls into the next period
|
||||
//
|
||||
// This is adequate for our purposes of gathering general usage and trends.
|
||||
//
|
||||
// PeriodStart is the approximate start of the collection period.
|
||||
PeriodStart time.Time `json:"period_start"`
|
||||
// PeriodDurationMilliseconds is the expected duration of the collection
|
||||
// period (the telemetry snapshot frequency).
|
||||
PeriodDurationMilliseconds int64 `json:"period_duration_ms"`
|
||||
}
|
||||
|
||||
func ConvertAIBridgeInterceptionsSummary(endTime time.Time, provider, model, client string, summary database.CalculateAIBridgeInterceptionsTelemetrySummaryRow) AIBridgeInterceptionsSummary {
|
||||
|
||||
@@ -880,6 +880,8 @@ func TestTelemetry_BoundaryUsageSummary(t *testing.T) {
|
||||
require.Equal(t, int64(2), snapshot.BoundaryUsageSummary.UniqueUsers)
|
||||
require.Equal(t, int64(10+5+3), snapshot.BoundaryUsageSummary.AllowedRequests)
|
||||
require.Equal(t, int64(2+1+0), snapshot.BoundaryUsageSummary.DeniedRequests)
|
||||
require.Equal(t, clock.Now().Add(-telemetry.DefaultSnapshotFrequency), snapshot.BoundaryUsageSummary.PeriodStart)
|
||||
require.Equal(t, int64(telemetry.DefaultSnapshotFrequency/time.Millisecond), snapshot.BoundaryUsageSummary.PeriodDurationMilliseconds)
|
||||
})
|
||||
|
||||
t.Run("ResetAfterCollection", func(t *testing.T) {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/acarl005/stripansi"
|
||||
"github.com/microcosm-cc/bluemonday"
|
||||
@@ -53,7 +54,7 @@ const (
|
||||
TruncateWithFullWords TruncateOption = 1 << 1
|
||||
)
|
||||
|
||||
// Truncate truncates s to n characters.
|
||||
// Truncate truncates s to n runes.
|
||||
// Additional behaviors can be specified using TruncateOptions.
|
||||
func Truncate(s string, n int, opts ...TruncateOption) string {
|
||||
var options TruncateOption
|
||||
@@ -63,7 +64,8 @@ func Truncate(s string, n int, opts ...TruncateOption) string {
|
||||
if n < 1 {
|
||||
return ""
|
||||
}
|
||||
if len(s) <= n {
|
||||
runes := []rune(s)
|
||||
if len(runes) <= n {
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -72,18 +74,18 @@ func Truncate(s string, n int, opts ...TruncateOption) string {
|
||||
maxLen--
|
||||
}
|
||||
var sb strings.Builder
|
||||
// If we need to truncate to full words, find the last word boundary before n.
|
||||
if options&TruncateWithFullWords != 0 {
|
||||
lastWordBoundary := strings.LastIndexFunc(s[:maxLen], unicode.IsSpace)
|
||||
// Convert the rune-safe prefix to a string, then find
|
||||
// the last word boundary (byte offset within that prefix).
|
||||
truncated := string(runes[:maxLen])
|
||||
lastWordBoundary := strings.LastIndexFunc(truncated, unicode.IsSpace)
|
||||
if lastWordBoundary < 0 {
|
||||
// We cannot find a word boundary. At this point, we'll truncate the string.
|
||||
// It's better than nothing.
|
||||
_, _ = sb.WriteString(s[:maxLen])
|
||||
} else { // lastWordBoundary <= maxLen
|
||||
_, _ = sb.WriteString(s[:lastWordBoundary])
|
||||
_, _ = sb.WriteString(truncated)
|
||||
} else {
|
||||
_, _ = sb.WriteString(truncated[:lastWordBoundary])
|
||||
}
|
||||
} else {
|
||||
_, _ = sb.WriteString(s[:maxLen])
|
||||
_, _ = sb.WriteString(string(runes[:maxLen]))
|
||||
}
|
||||
|
||||
if options&TruncateWithEllipsis != 0 {
|
||||
@@ -126,3 +128,13 @@ func UISanitize(in string) string {
|
||||
}
|
||||
return strings.TrimSpace(b.String())
|
||||
}
|
||||
|
||||
// Capitalize returns s with its first rune upper-cased. It is safe for
|
||||
// multi-byte UTF-8 characters, unlike naive byte-slicing approaches.
|
||||
func Capitalize(s string) string {
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
if size == 0 {
|
||||
return s
|
||||
}
|
||||
return string(unicode.ToUpper(r)) + s[size:]
|
||||
}
|
||||
|
||||
@@ -57,6 +57,17 @@ func TestTruncate(t *testing.T) {
|
||||
{"foo bar", 1, "…", []strings.TruncateOption{strings.TruncateWithFullWords, strings.TruncateWithEllipsis}},
|
||||
{"foo bar", 0, "", []strings.TruncateOption{strings.TruncateWithFullWords, strings.TruncateWithEllipsis}},
|
||||
{"This is a very long task prompt that should be truncated to 160 characters. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", 160, "This is a very long task prompt that should be truncated to 160 characters. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod tempor…", []strings.TruncateOption{strings.TruncateWithFullWords, strings.TruncateWithEllipsis}},
|
||||
// Multi-byte rune handling.
|
||||
{"日本語テスト", 3, "日本語", nil},
|
||||
{"日本語テスト", 4, "日本語テ", nil},
|
||||
{"日本語テスト", 6, "日本語テスト", nil},
|
||||
{"日本語テスト", 4, "日本語…", []strings.TruncateOption{strings.TruncateWithEllipsis}},
|
||||
{"🎉🎊🎈🎁", 2, "🎉🎊", nil},
|
||||
{"🎉🎊🎈🎁", 3, "🎉🎊…", []strings.TruncateOption{strings.TruncateWithEllipsis}},
|
||||
// Multi-byte with full-word truncation.
|
||||
{"hello 日本語", 7, "hello…", []strings.TruncateOption{strings.TruncateWithFullWords, strings.TruncateWithEllipsis}},
|
||||
{"hello 日本語", 8, "hello 日…", []strings.TruncateOption{strings.TruncateWithEllipsis}},
|
||||
{"日本語 テスト", 4, "日本語", []strings.TruncateOption{strings.TruncateWithFullWords}},
|
||||
} {
|
||||
tName := fmt.Sprintf("%s_%d", tt.s, tt.n)
|
||||
for _, opt := range tt.options {
|
||||
@@ -107,3 +118,24 @@ func TestUISanitize(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCapitalize(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{"", ""},
|
||||
{"hello", "Hello"},
|
||||
{"über", "Über"},
|
||||
{"Hello", "Hello"},
|
||||
{"a", "A"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(fmt.Sprintf("%q", tt.input), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
assert.Equal(t, tt.expected, strings.Capitalize(tt.input))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -959,7 +959,7 @@ func claimPrebuild(
|
||||
nextStartAt sql.NullTime,
|
||||
ttl sql.NullInt64,
|
||||
) (*database.Workspace, error) {
|
||||
claimedID, err := claimer.Claim(ctx, now, owner.ID, name, templateVersionPresetID, autostartSchedule, nextStartAt, ttl)
|
||||
claimedID, err := claimer.Claim(ctx, db, now, owner.ID, name, templateVersionPresetID, autostartSchedule, nextStartAt, ttl)
|
||||
if err != nil {
|
||||
// TODO: enhance this by clarifying whether this *specific* prebuild failed or whether there are none to claim.
|
||||
return nil, xerrors.Errorf("claim prebuild: %w", err)
|
||||
@@ -2353,6 +2353,17 @@ func (api *API) patchWorkspaceACL(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Don't allow adding new groups or users to a workspace associated with a
|
||||
// task. Sharing a task workspace without sharing the task itself is a broken
|
||||
// half measure that we don't want to support right now. To be fixed!
|
||||
if workspace.TaskID.Valid {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Task workspaces cannot be shared.",
|
||||
Detail: "This workspace is managed by a task. Task sharing has not yet been implemented.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
validErrs := acl.Validate(ctx, api.Database, WorkspaceACLUpdateValidator(req))
|
||||
if len(validErrs) > 0 {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
|
||||
@@ -372,10 +372,6 @@ type Feature struct {
|
||||
|
||||
// Below is only for features that use usage periods.
|
||||
|
||||
// SoftLimit is the soft limit of the feature, and is only used for showing
|
||||
// included limits in the dashboard. No license validation or warnings are
|
||||
// generated from this value.
|
||||
SoftLimit *int64 `json:"soft_limit,omitempty"`
|
||||
// UsagePeriod denotes that the usage is a counter that accumulates over
|
||||
// this period (and most likely resets with the issuance of the next
|
||||
// license).
|
||||
@@ -3616,7 +3612,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
Value: &c.AI.BridgeConfig.MaxConcurrency,
|
||||
Default: "0",
|
||||
Group: &deploymentGroupAIBridge,
|
||||
YAML: "maxConcurrency",
|
||||
YAML: "max_concurrency",
|
||||
},
|
||||
{
|
||||
Name: "AI Bridge Rate Limit",
|
||||
@@ -3626,7 +3622,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
Value: &c.AI.BridgeConfig.RateLimit,
|
||||
Default: "0",
|
||||
Group: &deploymentGroupAIBridge,
|
||||
YAML: "rateLimit",
|
||||
YAML: "rate_limit",
|
||||
},
|
||||
{
|
||||
Name: "AI Bridge Structured Logging",
|
||||
@@ -3636,7 +3632,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
Value: &c.AI.BridgeConfig.StructuredLogging,
|
||||
Default: "false",
|
||||
Group: &deploymentGroupAIBridge,
|
||||
YAML: "structuredLogging",
|
||||
YAML: "structured_logging",
|
||||
},
|
||||
{
|
||||
Name: "AI Bridge Send Actor Headers",
|
||||
@@ -3658,7 +3654,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
Value: &c.AI.BridgeConfig.CircuitBreakerEnabled,
|
||||
Default: "false",
|
||||
Group: &deploymentGroupAIBridge,
|
||||
YAML: "circuitBreakerEnabled",
|
||||
YAML: "circuit_breaker_enabled",
|
||||
},
|
||||
{
|
||||
Name: "AI Bridge Circuit Breaker Failure Threshold",
|
||||
@@ -3674,7 +3670,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
Default: "5",
|
||||
Hidden: true,
|
||||
Group: &deploymentGroupAIBridge,
|
||||
YAML: "circuitBreakerFailureThreshold",
|
||||
YAML: "circuit_breaker_failure_threshold",
|
||||
},
|
||||
{
|
||||
Name: "AI Bridge Circuit Breaker Interval",
|
||||
@@ -3685,7 +3681,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
Default: "10s",
|
||||
Hidden: true,
|
||||
Group: &deploymentGroupAIBridge,
|
||||
YAML: "circuitBreakerInterval",
|
||||
YAML: "circuit_breaker_interval",
|
||||
Annotations: serpent.Annotations{}.Mark(annotationFormatDuration, "true"),
|
||||
},
|
||||
{
|
||||
@@ -3697,7 +3693,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
Default: "30s",
|
||||
Hidden: true,
|
||||
Group: &deploymentGroupAIBridge,
|
||||
YAML: "circuitBreakerTimeout",
|
||||
YAML: "circuit_breaker_timeout",
|
||||
Annotations: serpent.Annotations{}.Mark(annotationFormatDuration, "true"),
|
||||
},
|
||||
{
|
||||
@@ -3714,7 +3710,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
Default: "3",
|
||||
Hidden: true,
|
||||
Group: &deploymentGroupAIBridge,
|
||||
YAML: "circuitBreakerMaxRequests",
|
||||
YAML: "circuit_breaker_max_requests",
|
||||
},
|
||||
|
||||
// AI Bridge Proxy Options
|
||||
|
||||
@@ -12,8 +12,9 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
LicenseExpiryClaim = "license_expires"
|
||||
LicenseTelemetryRequiredErrorText = "License requires telemetry but telemetry is disabled"
|
||||
LicenseExpiryClaim = "license_expires"
|
||||
LicenseTelemetryRequiredErrorText = "License requires telemetry but telemetry is disabled"
|
||||
LicenseManagedAgentLimitExceededWarningText = "You have built more workspaces with managed agents than your license allows."
|
||||
)
|
||||
|
||||
type AddLicenseRequest struct {
|
||||
|
||||
@@ -115,6 +115,25 @@ specified in your template in the `disable_params` search params list
|
||||
[](https://YOUR_ACCESS_URL/templates/YOUR_TEMPLATE/workspace?disable_params=first_parameter,second_parameter)
|
||||
```
|
||||
|
||||
### Security: consent dialog for automatic creation
|
||||
|
||||
When using `mode=auto` with prefilled `param.*` values, Coder displays a
|
||||
security consent dialog before creating the workspace. This protects users
|
||||
from malicious links that could provision workspaces with untrusted
|
||||
configurations, such as dotfiles or startup scripts from unknown sources.
|
||||
|
||||
The dialog shows:
|
||||
|
||||
- A warning that a workspace is about to be created automatically from a link
|
||||
- All prefilled `param.*` values from the URL
|
||||
- **Confirm and Create** and **Cancel** buttons
|
||||
|
||||
The workspace is only created if the user explicitly clicks **Confirm and
|
||||
Create**. Clicking **Cancel** falls back to the standard creation form where
|
||||
all parameters can be reviewed manually.
|
||||
|
||||

|
||||
|
||||
### Example: Kubernetes
|
||||
|
||||
For a full example of the Open in Coder flow in Kubernetes, check out
|
||||
|
||||
@@ -1,133 +0,0 @@
|
||||
# Client Configuration
|
||||
|
||||
Once AI Bridge is setup on your deployment, the AI coding tools used by your users will need to be configured to route requests via AI Bridge.
|
||||
|
||||
## Base URLs
|
||||
|
||||
Most AI coding tools allow the "base URL" to be customized. In other words, when a request is made to OpenAI's API from your coding tool, the API endpoint such as [`/v1/chat/completions`](https://platform.openai.com/docs/api-reference/chat) will be appended to the configured base. Therefore, instead of the default base URL of `https://api.openai.com/v1`, you'll need to set it to `https://coder.example.com/api/v2/aibridge/openai/v1`.
|
||||
|
||||
The exact configuration method varies by client — some use environment variables, others use configuration files or UI settings:
|
||||
|
||||
- **OpenAI-compatible clients**: Set the base URL (commonly via the `OPENAI_BASE_URL` environment variable) to `https://coder.example.com/api/v2/aibridge/openai/v1`
|
||||
- **Anthropic-compatible clients**: Set the base URL (commonly via the `ANTHROPIC_BASE_URL` environment variable) to `https://coder.example.com/api/v2/aibridge/anthropic`
|
||||
|
||||
Replace `coder.example.com` with your actual Coder deployment URL.
|
||||
|
||||
## Authentication
|
||||
|
||||
Instead of distributing provider-specific API keys (OpenAI/Anthropic keys) to users, they authenticate to AI Bridge using their **Coder session token** or **API key**:
|
||||
|
||||
- **OpenAI clients**: Users set `OPENAI_API_KEY` to their Coder session token or API key
|
||||
- **Anthropic clients**: Users set `ANTHROPIC_API_KEY` to their Coder session token or API key
|
||||
|
||||
Again, the exact environment variable or setting naming may differ from tool to tool; consult your tool's documentation.
|
||||
|
||||
## Configuring In-Workspace Tools
|
||||
|
||||
AI coding tools running inside a Coder workspace, such as IDE extensions, can be configured to use AI Bridge.
|
||||
|
||||
While users can manually configure these tools with a long-lived API key, template admins can provide a more seamless experience by pre-configuring them. Admins can automatically inject the user's session token with `data.coder_workspace_owner.me.session_token` and the AI Bridge base URL into the workspace environment.
|
||||
|
||||
In this example, Claude code respects these environment variables and will route all requests via AI Bridge.
|
||||
|
||||
This is the fastest way to bring existing agents like Roo Code, Cursor, or Claude Code into compliance without adopting Coder Tasks.
|
||||
|
||||
```hcl
|
||||
data "coder_workspace_owner" "me" {}
|
||||
|
||||
data "coder_workspace" "me" {}
|
||||
|
||||
resource "coder_agent" "dev" {
|
||||
arch = "amd64"
|
||||
os = "linux"
|
||||
dir = local.repo_dir
|
||||
env = {
|
||||
ANTHROPIC_BASE_URL : "${data.coder_workspace.me.access_url}/api/v2/aibridge/anthropic",
|
||||
ANTHROPIC_AUTH_TOKEN : data.coder_workspace_owner.me.session_token
|
||||
}
|
||||
... # other agent configuration
|
||||
}
|
||||
```
|
||||
|
||||
### Using Coder Tasks
|
||||
|
||||
Agents like Claude Code can be configured to route through AI Bridge in any template by pre-configuring the agent with the session token. [Coder Tasks](../tasks.md) is particularly useful for this pattern, providing a framework for agents to complete background development operations autonomously. To route agents through AI Bridge in a Coder Tasks template, pre-configure it to install Claude Code and configure it with the session token:
|
||||
|
||||
```hcl
|
||||
data "coder_workspace_owner" "me" {}
|
||||
|
||||
data "coder_workspace" "me" {}
|
||||
|
||||
data "coder_task" "me" {}
|
||||
|
||||
resource "coder_agent" "dev" {
|
||||
arch = "amd64"
|
||||
os = "linux"
|
||||
dir = local.repo_dir
|
||||
env = {
|
||||
ANTHROPIC_BASE_URL : "${data.coder_workspace.me.access_url}/api/v2/aibridge/anthropic",
|
||||
ANTHROPIC_AUTH_TOKEN : data.coder_workspace_owner.me.session_token
|
||||
}
|
||||
... # other agent configuration
|
||||
}
|
||||
|
||||
# See https://registry.coder.com/modules/coder/claude-code for more information
|
||||
module "claude-code" {
|
||||
count = data.coder_task.me.enabled ? data.coder_workspace.me.start_count : 0
|
||||
source = "dev.registry.coder.com/coder/claude-code/coder"
|
||||
version = ">= 4.0.0"
|
||||
agent_id = coder_agent.dev.id
|
||||
workdir = "/home/coder/project"
|
||||
claude_api_key = data.coder_workspace_owner.me.session_token # Use the Coder session token to authenticate with AI Bridge
|
||||
ai_prompt = data.coder_task.me.prompt
|
||||
... # other claude-code configuration
|
||||
}
|
||||
|
||||
# The coder_ai_task resource associates the task to the app.
|
||||
resource "coder_ai_task" "task" {
|
||||
count = data.coder_task.me.enabled ? data.coder_workspace.me.start_count : 0
|
||||
app_id = module.claude-code[0].task_app_id
|
||||
}
|
||||
```
|
||||
|
||||
## External and Desktop Clients
|
||||
|
||||
You can also configure AI tools running outside of a Coder workspace, such as local IDE extensions or desktop applications, to connect to AI Bridge.
|
||||
|
||||
The configuration is the same: point the tool to the AI Bridge [base URL](#base-urls) and use a Coder API key for authentication.
|
||||
|
||||
Users can generate a long-lived API key from the Coder UI or CLI. Follow the instructions at [Sessions and API tokens](../../admin/users/sessions-tokens.md#generate-a-long-lived-api-token-on-behalf-of-yourself) to create one.
|
||||
|
||||
## Compatibility
|
||||
|
||||
The table below shows tested AI clients and their compatibility with AI Bridge. Click each client name for vendor-specific configuration instructions. Report issues or share compatibility updates in the [aibridge](https://github.com/coder/aibridge) issue tracker.
|
||||
|
||||
| Client | OpenAI support | Anthropic support | Notes |
|
||||
|-------------------------------------------------------------------------------------------------------------------------------------|----------------|-------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [Claude Code](https://docs.claude.com/en/docs/claude-code/settings#environment-variables) | - | ✅ | Works out of the box and can be preconfigured in templates. |
|
||||
| Claude Code (VS Code) | - | ✅ | May require signing in once; afterwards respects workspace environment variables. |
|
||||
| Cursor | ❌ | ❌ | Support dropped for `v1/chat/completions` endpoints; `v1/responses` support is in progress [#16](https://github.com/coder/aibridge/issues/16) |
|
||||
| [Roo Code](https://docs.roocode.com/features/api-configuration-profiles#creating-and-managing-profiles) | ✅ | ✅ | Use the **OpenAI Compatible** provider with the legacy format to avoid `/v1/responses`. |
|
||||
| [Codex CLI](https://github.com/openai/codex/blob/main/docs/config.md#model_providers) | ⚠️ | N/A | • Use v0.58.0 (`npm install -g @openai/codex@0.58.0`). Newer versions have a [bug](https://github.com/openai/codex/issues/8107) breaking the request payload. <br/>• `gpt-5-codex` support is [in progress](https://github.com/coder/aibridge/issues/16). |
|
||||
| [GitHub Copilot (VS Code)](https://code.visualstudio.com/docs/copilot/customization/language-models#_add-an-openaicompatible-model) | ✅ | ❌ | Requires the pre-release extension. Anthropic endpoints are not supported. |
|
||||
| [Goose](https://block.github.io/goose/docs/getting-started/providers/#available-providers) | ❓ | ❓ | |
|
||||
| [Goose Desktop](https://block.github.io/goose/docs/getting-started/providers/#available-providers) | ❓ | ✅ | |
|
||||
| WindSurf | ❌ | ❌ | No option to override the base URL. |
|
||||
| Sourcegraph Amp | ❌ | ❌ | No option to override the base URL. |
|
||||
| Kiro | ❌ | ❌ | No option to override the base URL. |
|
||||
| [Copilot CLI](https://github.com/github/copilot-cli/issues/104) | ❌ | ❌ | No support for custom base URLs and uses a `GITHUB_TOKEN` for authentication. |
|
||||
| [Kilo Code](https://kilocode.ai/docs/features/api-configuration-profiles#creating-and-managing-profiles) | ✅ | ✅ | Similar to Roo Code. |
|
||||
| Gemini CLI | ❌ | ❌ | Not supported yet. |
|
||||
| [Amazon Q CLI](https://aws.amazon.com/q/) | ❌ | ❌ | Limited to Amazon Q subscriptions; no custom endpoint support. |
|
||||
|
||||
Legend: ✅ works, ⚠️ limited support, ❌ not supported, ❓ not yet verified, — not applicable.
|
||||
|
||||
### Compatibility Overview
|
||||
|
||||
Most AI coding assistants can use AI Bridge, provided they support custom base URLs. Client-specific requirements vary:
|
||||
|
||||
- Some clients require specific URL formats (for example, removing the `/v1` suffix).
|
||||
- Some clients proxy requests through their own servers, which limits compatibility.
|
||||
- Some clients do not support custom base URLs.
|
||||
|
||||
See the table in the [compatibility](#compatibility) section above for the combinations we have verified and any known issues.
|
||||
@@ -0,0 +1,55 @@
|
||||
# Claude Code
|
||||
|
||||
## Configuration
|
||||
|
||||
Claude Code can be configured using environment variables.
|
||||
|
||||
* **Base URL**: `ANTHROPIC_BASE_URL` should point to `https://coder.example.com/api/v2/aibridge/anthropic`
|
||||
* **API Key**: `ANTHROPIC_API_KEY` should be your [Coder session token](../../../admin/users/sessions-tokens.md#generate-a-long-lived-api-token-on-behalf-of-yourself).
|
||||
|
||||
### Pre-configuring in Templates
|
||||
|
||||
Template admins can pre-configure Claude Code for a seamless experience. Admins can automatically inject the user's Coder session token and the AI Bridge base URL into the workspace environment.
|
||||
|
||||
```hcl
|
||||
module "claude-code" {
|
||||
source = "registry.coder.com/coder/claude-code/coder"
|
||||
version = "4.7.3"
|
||||
agent_id = coder_agent.main.id
|
||||
workdir = "/path/to/project" # Set to your project directory
|
||||
enable_aibridge = true
|
||||
}
|
||||
```
|
||||
|
||||
### Coder Tasks
|
||||
|
||||
[Coder Tasks](../../tasks.md) provides a framework for agents to complete background development operations autonomously. Claude Code can be configured in your Tasks automatically:
|
||||
|
||||
```hcl
|
||||
resource "coder_ai_task" "task" {
|
||||
count = data.coder_workspace.me.start_count
|
||||
app_id = module.claude-code.task_app_id
|
||||
}
|
||||
|
||||
data "coder_task" "me" {}
|
||||
|
||||
module "claude-code" {
|
||||
source = "registry.coder.com/coder/claude-code/coder"
|
||||
version = "4.7.3"
|
||||
agent_id = coder_agent.main.id
|
||||
workdir = "/path/to/project" # Set to your project directory
|
||||
ai_prompt = data.coder_task.me.prompt
|
||||
|
||||
# Route through AI Bridge (Premium feature)
|
||||
enable_aibridge = true
|
||||
}
|
||||
```
|
||||
|
||||
## VS Code Extension
|
||||
|
||||
The Claude Code VS Code extension is also supported.
|
||||
|
||||
1. If pre-configured in the workspace environment variables (as shown above), it typically respects them.
|
||||
2. You may need to sign in once; afterwards, it respects the workspace environment variables.
|
||||
|
||||
**References:** [Claude Code Settings](https://docs.claude.com/en/docs/claude-code/settings#environment-variables)
|
||||
@@ -0,0 +1,36 @@
|
||||
# Cline
|
||||
|
||||
Cline supports both OpenAI and Anthropic models and can be configured to use AI Bridge by setting providers.
|
||||
|
||||
## Configuration
|
||||
|
||||
To configure Cline to use AI Bridge, follow these steps:
|
||||

|
||||
|
||||
<div class="tabs">
|
||||
|
||||
### OpenAI Compatible
|
||||
|
||||
1. Open Cline in VS Code.
|
||||
1. Go to **Settings**.
|
||||
1. **API Provider**: Select **OpenAI Compatible**.
|
||||
1. **Base URL**: Enter `https://coder.example.com/api/v2/aibridge/openai/v1`.
|
||||
1. **OpenAI Compatible API Key**: Enter your **[Coder Session Token](../../../admin/users/sessions-tokens.md#generate-a-long-lived-api-token-on-behalf-of-yourself)**.
|
||||
1. **Model ID** (Optional): Enter the model you wish to use (e.g., `gpt-5.2-codex`).
|
||||
|
||||

|
||||
|
||||
### Anthropic
|
||||
|
||||
1. Open Cline in VS Code.
|
||||
1. Go to **Settings**.
|
||||
1. **API Provider**: Select **Anthropic**.
|
||||
1. **Anthropic API Key**: Enter your **Coder Session Token**.
|
||||
1. **Base URL**: Enter `https://coder.example.com/api/v2/aibridge/anthropic` after checking **_Use custom base URL_**.
|
||||
1. **Model ID** (Optional): Select your desired Claude model.
|
||||
|
||||

|
||||
|
||||
</div>
|
||||
|
||||
**References:** [Cline Configuration](https://github.com/cline/cline)
|
||||
@@ -0,0 +1,50 @@
|
||||
# Codex CLI
|
||||
|
||||
Codex CLI can be configured to use AI Bridge by setting up a custom model provider.
|
||||
|
||||
## Configuration
|
||||
|
||||
> [!NOTE]
|
||||
> When running Codex CLI inside a Coder workspace, use the configuration below to route requests through AI Bridge.
|
||||
|
||||
To configure Codex CLI to use AI Bridge, set the following configuration options in your Codex configuration file (e.g., `~/.codex/config.toml`):
|
||||
|
||||
```toml
|
||||
[model_providers.aibridge]
|
||||
name = "AI Bridge"
|
||||
base_url = "${data.coder_workspace.me.access_url}/api/v2/aibridge/openai/v1"
|
||||
env_key = "OPENAI_API_KEY"
|
||||
wire_api = "responses"
|
||||
|
||||
[profiles.aibridge]
|
||||
model_provider = "aibridge"
|
||||
model = "gpt-5.2-codex"
|
||||
```
|
||||
|
||||
Run Codex with the `aibridge` profile:
|
||||
|
||||
```bash
|
||||
codex --profile aibridge
|
||||
```
|
||||
|
||||
If configuring within a Coder workspace, you can also use the [Codex CLI](https://registry.coder.com/modules/coder-labs/codex) module and set the following variables:
|
||||
|
||||
```tf
|
||||
module "codex" {
|
||||
source = "registry.coder.com/coder-labs/codex/coder"
|
||||
version = "~> 4.1"
|
||||
agent_id = coder_agent.main.id
|
||||
workdir = "/path/to/project" # Set to your project directory
|
||||
enable_aibridge = true
|
||||
}
|
||||
```
|
||||
|
||||
## Authentication
|
||||
|
||||
To authenticate with AI Bridge, get your **[Coder session token](../../../admin/users/sessions-tokens.md#generate-a-long-lived-api-token-on-behalf-of-yourself)** and set it in your environment:
|
||||
|
||||
```bash
|
||||
export OPENAI_API_KEY="<your-coder-session-token>"
|
||||
```
|
||||
|
||||
**References:** [Codex CLI Configuration](https://developers.openai.com/codex/config-advanced)
|
||||
@@ -0,0 +1,35 @@
|
||||
# Factory
|
||||
|
||||
Factort's Droid agent can be configured to use AI Bridge by setting up custom models for OpenAI and Anthropic.
|
||||
|
||||
## Configuration
|
||||
|
||||
1. Open `~/.factory/settings.json` (create it if it does not exist).
|
||||
2. Add a `customModels` entry for each provider you want to use with AI Bridge.
|
||||
3. Replace `coder.example.com` with your Coder deployment URL.
|
||||
4. Use a **[Coder session token](../../../admin/users/sessions-tokens.md#generate-a-long-lived-api-token-on-behalf-of-yourself)** for `apiKey`.
|
||||
|
||||
```json
|
||||
{
|
||||
"customModels": [
|
||||
{
|
||||
"model": "claude-4-5-opus",
|
||||
"displayName": "Claude (Coder AI Bridge)",
|
||||
"baseUrl": "https://coder.example.com/api/v2/aibridge/anthropic",
|
||||
"apiKey": "<your-coder-session-token>",
|
||||
"provider": "anthropic",
|
||||
"maxOutputTokens": 8192
|
||||
},
|
||||
{
|
||||
"model": "gpt-5.2-codex",
|
||||
"displayName": "GPT (Coder AI Bridge)",
|
||||
"baseUrl": "https://coder.example.com/api/v2/aibridge/openai/v1",
|
||||
"apiKey": "<your-coder-session-token>",
|
||||
"provider": "openai",
|
||||
"maxOutputTokens": 16384
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**References:** [Factory BYOK OpenAI & Anthropic](https://docs.factory.ai/cli/byok/openai-anthropic)
|
||||
@@ -0,0 +1,99 @@
|
||||
# Client Configuration
|
||||
|
||||
Once AI Bridge is setup on your deployment, the AI coding tools used by your users will need to be configured to route requests via AI Bridge.
|
||||
|
||||
## Base URLs
|
||||
|
||||
Most AI coding tools allow the "base URL" to be customized. In other words, when a request is made to OpenAI's API from your coding tool, the API endpoint such as [`/v1/chat/completions`](https://platform.openai.com/docs/api-reference/chat) will be appended to the configured base. Therefore, instead of the default base URL of `https://api.openai.com/v1`, you'll need to set it to `https://coder.example.com/api/v2/aibridge/openai/v1`.
|
||||
|
||||
The exact configuration method varies by client — some use environment variables, others use configuration files or UI settings:
|
||||
|
||||
- **OpenAI-compatible clients**: Set the base URL (commonly via the `OPENAI_BASE_URL` environment variable) to `https://coder.example.com/api/v2/aibridge/openai/v1`
|
||||
- **Anthropic-compatible clients**: Set the base URL (commonly via the `ANTHROPIC_BASE_URL` environment variable) to `https://coder.example.com/api/v2/aibridge/anthropic`
|
||||
|
||||
Replace `coder.example.com` with your actual Coder deployment URL.
|
||||
|
||||
## Authentication
|
||||
|
||||
Instead of distributing provider-specific API keys (OpenAI/Anthropic keys) to users, they authenticate to AI Bridge using their **Coder session token** or **API key**:
|
||||
|
||||
- **OpenAI clients**: Users set `OPENAI_API_KEY` to their Coder session token or API key
|
||||
- **Anthropic clients**: Users set `ANTHROPIC_API_KEY` to their Coder session token or API key
|
||||
|
||||
> [!NOTE]
|
||||
> Only Coder-issued tokens can authenticate users against AI Bridge.
|
||||
> AI Bridge will use provider-specific API keys to [authenticate against upstream AI services](https://coder.com/docs/ai-coder/ai-bridge/setup#configure-providers).
|
||||
|
||||
Again, the exact environment variable or setting naming may differ from tool to tool. See a list of [supported clients](#all-supported-clients) below and consult your tool's documentation for details.
|
||||
|
||||
### Retrieving your session token
|
||||
|
||||
[Generate a long-lived API token](../../../admin/users/sessions-tokens.md#generate-a-long-lived-api-token-on-behalf-of-yourself) via the Coder dashboard and use it to configure your AI coding tool:
|
||||
|
||||
```sh
|
||||
export ANTHROPIC_API_KEY="your-coder-session-token"
|
||||
export ANTHROPIC_BASE_URL="https://coder.example.com/api/v2/aibridge/anthropic"
|
||||
```
|
||||
|
||||
## Compatibility
|
||||
|
||||
The table below shows tested AI clients and their compatibility with AI Bridge.
|
||||
|
||||
| Client | OpenAI | Anthropic | Notes |
|
||||
|----------------------------------|--------|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [Claude Code](./claude-code.md) | - | ✅ | |
|
||||
| [Codex CLI](./codex.md) | ✅ | - | |
|
||||
| [OpenCode](./opencode.md) | ✅ | ✅ | |
|
||||
| [Factory](./factory.md) | ✅ | ✅ | |
|
||||
| [Cline](./cline.md) | ✅ | ✅ | |
|
||||
| [Kilo Code](./kilo-code.md) | ✅ | ✅ | |
|
||||
| [Roo Code](./roo-code.md) | ✅ | ✅ | |
|
||||
| [VS Code](./vscode.md) | ✅ | ❌ | Only supports Custom Base URL for OpenAI. |
|
||||
| [JetBrains IDEs](./jetbrains.md) | ✅ | ❌ | Works in Chat mode via "Bring Your Own Key". |
|
||||
| [Zed](./zed.md) | ✅ | ✅ | |
|
||||
| WindSurf | ❌ | ❌ | No option to override base URL. |
|
||||
| Cursor | ❌ | ❌ | Override for OpenAI broken ([upstream issue](https://forum.cursor.com/t/requests-are-sent-to-incorrect-endpoint-when-using-base-url-override/144894)). |
|
||||
| Sourcegraph Amp | ❌ | ❌ | No option to override base URL. |
|
||||
| Kiro | ❌ | ❌ | No option to override base URL. |
|
||||
| Gemini CLI | ❌ | ❌ | No Gemini API support. Upvote [this issue](https://github.com/coder/aibridge/issues/27). |
|
||||
| Antigravity | ❌ | ❌ | No option to override base URL. |
|
||||
|
|
||||
|
||||
*Legend: ✅ supported, ❌ not supported, - not applicable.*
|
||||
|
||||
## Configuring In-Workspace Tools
|
||||
|
||||
AI coding tools running inside a Coder workspace, such as IDE extensions, can be configured to use AI Bridge.
|
||||
|
||||
While users can manually configure these tools with a long-lived API key, template admins can provide a more seamless experience by pre-configuring them. Admins can automatically inject the user's session token with `data.coder_workspace_owner.me.session_token` and the AI Bridge base URL into the workspace environment.
|
||||
|
||||
In this example, Claude Code respects these environment variables and will route all requests via AI Bridge.
|
||||
|
||||
```hcl
|
||||
data "coder_workspace_owner" "me" {}
|
||||
|
||||
data "coder_workspace" "me" {}
|
||||
|
||||
resource "coder_agent" "dev" {
|
||||
arch = "amd64"
|
||||
os = "linux"
|
||||
dir = local.repo_dir
|
||||
env = {
|
||||
ANTHROPIC_BASE_URL : "${data.coder_workspace.me.access_url}/api/v2/aibridge/anthropic",
|
||||
ANTHROPIC_AUTH_TOKEN : data.coder_workspace_owner.me.session_token
|
||||
}
|
||||
... # other agent configuration
|
||||
}
|
||||
```
|
||||
|
||||
## External and Desktop Clients
|
||||
|
||||
You can also configure AI tools running outside of a Coder workspace, such as local IDE extensions or desktop applications, to connect to AI Bridge.
|
||||
|
||||
The configuration is the same: point the tool to the AI Bridge [base URL](#base-urls) and use a Coder API key for authentication.
|
||||
|
||||
Users can generate a long-lived API key from the Coder UI or CLI. Follow the instructions at [Sessions and API tokens](../../../admin/users/sessions-tokens.md#generate-a-long-lived-api-token-on-behalf-of-yourself) to create one.
|
||||
|
||||
## All Supported Clients
|
||||
|
||||
<children></children>
|
||||
@@ -0,0 +1,35 @@
|
||||
# JetBrains IDEs
|
||||
|
||||
JetBrains IDE (IntelliJ IDEA, PyCharm, WebStorm, etc.) support AI Bridge via the ["Bring Your Own Key" (BYOK)](https://www.jetbrains.com/help/ai-assistant/use-custom-models.html#provide-your-own-api-key) feature.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* [**JetBrains AI Assistant**](https://www.jetbrains.com/help/ai-assistant/installation-guide-ai-assistant.html): Installed and enabled.
|
||||
* **Authentication**: Your **[Coder session token](../../../admin/users/sessions-tokens.md#generate-a-long-lived-api-token-on-behalf-of-yourself)**.
|
||||
|
||||
## Configuration
|
||||
|
||||
1. **Open Settings**: Go to **Settings** > **Tools** > **AI Assistant** > **Models & API Keys**.
|
||||
1. **Configure Provider**: Go to **Third-party AI providers**.
|
||||
1. **Choose Provider**: Choose **OpenAI-compatible**.
|
||||
1. **URL**: `https://coder.example.com/api/v2/aibridge/openai/v1`
|
||||
1. **API Key**: Paste your **[Coder session token](../../../admin/users/sessions-tokens.md#generate-a-long-lived-api-token-on-behalf-of-yourself)**.
|
||||
1. **Apply**: Click **Apply** and **OK**.
|
||||
|
||||

|
||||
|
||||
## Using the AI Assistant
|
||||
|
||||
1. Go back to **AI Chat** on theleft side bar and choose **Chat**.
|
||||
1. In the Model dropdown, select the desired model (e.g., `gpt-5.2`).
|
||||
|
||||

|
||||
|
||||
You can now use the AI Assistant chat with the configured provider.
|
||||
|
||||
> [!NOTE]
|
||||
>
|
||||
> * JetBrains AI Assistant currently only supports OpenAI-compatible endpoints. There is an open [issue](https://youtrack.jetbrains.com/issue/LLM-22740) tracking support for Anthropic.
|
||||
> * JetBrains AI Assistant may not support all models that support OPenAI's `/chat/completions` endpoint in Chat mode.
|
||||
|
||||
**References:** [Use custom models with JetBrains AI Assistant](https://www.jetbrains.com/help/ai-assistant/use-custom-models.html#provide-your-own-api-key)
|
||||
@@ -0,0 +1,33 @@
|
||||
# Kilo Code
|
||||
|
||||
Kilo Code allows you to configure providers via the UI and can be set up to use AI Bridge.
|
||||
|
||||
## Configuration
|
||||
|
||||
<div class="tabs">
|
||||
|
||||
### OpenAI Compatible
|
||||
|
||||
1. Open Kilo Code in VS Code.
|
||||
1. Go to **Settings**.
|
||||
1. **Provider**: Select **OpenAI**.
|
||||
1. **Base URL**: Enter `https://coder.example.com/api/v2/aibridge/openai/v1`.
|
||||
1. **API Key**: Enter your **[Coder Session Token](../../../admin/users/sessions-tokens.md#generate-a-long-lived-api-token-on-behalf-of-yourself)**.
|
||||
1. **Model ID**: Enter the model you wish to use (e.g., `gpt-5.2-codex`).
|
||||
|
||||

|
||||
|
||||
### Anthropic
|
||||
|
||||
1. Open Kilo Code in VS Code.
|
||||
1. Go to **Settings**.
|
||||
1. **Provider**: Select **Anthropic**.
|
||||
1. **Base URL**: Enter `https://coder.example.com/api/v2/aibridge/anthropic`.
|
||||
1. **API Key**: Enter your **Coder Session Token**.
|
||||
1. **Model ID**: Select your desired Claude model.
|
||||
|
||||

|
||||
|
||||
</div>
|
||||
|
||||
**References:** [Kilo Code Configuration](https://kilocode.ai/docs/ai-providers/openai-compatible)
|
||||
@@ -0,0 +1,44 @@
|
||||
# OpenCode
|
||||
|
||||
OpenCode supports both OpenAI and Anthropic models and can be configured to use AI Bridge by setting custom base URLs for each provider.
|
||||
|
||||
## Configuration
|
||||
|
||||
You can configure OpenCode to connect to AI Bridge by setting the following configuration options in your OpenCode configuration file (e.g., `~/.config/opencode/opencode.json`):
|
||||
|
||||
```json
|
||||
{
|
||||
"$schema": "https://opencode.ai/config.json",
|
||||
"provider": {
|
||||
"anthropic": {
|
||||
"options": {
|
||||
"baseURL": "https://coder.example.com/api/v2/aibridge/anthropic/v1"
|
||||
}
|
||||
},
|
||||
"openai": {
|
||||
"options": {
|
||||
"baseURL": "https://coder.example.com/api/v2/aibridge/openai/v1"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Authentication
|
||||
|
||||
To authenticate with AI Bridge, get your **[Coder session token](../../../admin/users/sessions-tokens.md#generate-a-long-lived-api-token-on-behalf-of-yourself)** and replace `<your-coder-session-token>` in `~/.local/share/opencode/auth.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"anthropic": {
|
||||
"type": "api",
|
||||
"key": "<your-coder-session-token>"
|
||||
},
|
||||
"openai": {
|
||||
"type": "api",
|
||||
"key": "<your-coder-session-token>"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**References:** [OpenCode Documentation](https://opencode.ai/docs/providers/#config)
|
||||
@@ -0,0 +1,39 @@
|
||||
# Roo Code
|
||||
|
||||
Roo Code allows you to configure providers via the UI and can be set up to use AI Bridge.
|
||||
|
||||
## Configuration
|
||||
|
||||
Roo Code allows you to configure providers via the UI.
|
||||
|
||||
<div class="tabs">
|
||||
|
||||
### OpenAI Compatible
|
||||
|
||||
1. Open Roo Code in VS Code.
|
||||
1. Go to **Settings**.
|
||||
1. **Provider**: Select **OpenAI**.
|
||||
1. **Base URL**: Enter `https://coder.example.com/api/v2/aibridge/openai/v1`.
|
||||
1. **API Key**: Enter your **[Coder Session Token](../../../admin/users/sessions-tokens.md#generate-a-long-lived-api-token-on-behalf-of-yourself)**.
|
||||
1. **Model ID**: Enter the model you wish to use (e.g., `gpt-5.2-codex`).
|
||||

|
||||
|
||||
### Anthropic
|
||||
|
||||
1. Open Roo Code in VS Code.
|
||||
1. Go to **Settings**.
|
||||
1. **Provider**: Select **Anthropic**.
|
||||
1. **Base URL**: Enter `https://coder.example.com/api/v2/aibridge/anthropic`.
|
||||
1. **API Key**: Enter your **Coder Session Token**.
|
||||
1. **Model ID**: Select your desired Claude model.
|
||||
|
||||

|
||||
|
||||
</div>
|
||||
|
||||
### Notes
|
||||
|
||||
* If you encounter issues with the **OpenAI** provider type, use **OpenAI Compatible** to ensure correct endpoint routing.
|
||||
* Ensure your Coder deployment URL is reachable from your VS Code environment.
|
||||
|
||||
**References:** [Roo Code Configuration Profiles](https://docs.roocode.com/features/api-configuration-profiles#creating-and-managing-profiles)
|
||||
@@ -0,0 +1,50 @@
|
||||
# VS Code
|
||||
|
||||
VS Code's native chat can be configured to use AI Bridge with the GitHub Copilot Chat extension's custom language model support.
|
||||
|
||||
## Configuration
|
||||
|
||||
> [!IMPORTANT]
|
||||
> You need the **Pre-release** version of the [GitHub Copilot Chat extension](https://marketplace.visualstudio.com/items?itemName=GitHub.copilot-chat) and [VS Code Insiders](https://code.visualstudio.com/insiders/).
|
||||
|
||||
1. Open command palette (`Ctrl+Shift+P` or `Cmd+Shift+P` on Mac) and search for _Chat: Open Language Models (JSON)_.
|
||||
1. Paste the following JSON configuration, replacing `<your-coder-session-token>` with your **[Coder Session Token](../../../admin/users/sessions-tokens.md#generate-a-long-lived-api-token-on-behalf-of-yourself)**:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"name": "Coder",
|
||||
"vendor": "customoai",
|
||||
"apiKey": "your-coder-session-token>",
|
||||
"models": [
|
||||
{
|
||||
"name": "GPT 5.2",
|
||||
"url": "https://coder.example.com/api/v2/aibridge/openai/v1/chat/completions",
|
||||
"toolCalling": true,
|
||||
"vision": true,
|
||||
"thinking": true,
|
||||
"maxInputTokens": 272000,
|
||||
"maxOutputTokens": 128000,
|
||||
"id": "gpt-5.2"
|
||||
},
|
||||
{
|
||||
"name": "GPT 5.2 Codex",
|
||||
"url": "https://coder.example.com/api/v2/aibridge/openai/v1/responses",
|
||||
"toolCalling": true,
|
||||
"vision": true,
|
||||
"thinking": true,
|
||||
"maxInputTokens": 272000,
|
||||
"maxOutputTokens": 128000,
|
||||
"id": "gpt-5.2-codex"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
_Replace `coder.example.com` with your Coder deployment URL._
|
||||
|
||||
> [!NOTE]
|
||||
> The setting names may change as the feature moves from pre-release to stable. Refer to the official documentation for the latest setting keys.
|
||||
|
||||
**References:** [GitHub Copilot - Bring your own language model](https://code.visualstudio.com/docs/copilot/customization/language-models#_add-an-openaicompatible-model)
|
||||
@@ -0,0 +1,63 @@
|
||||
# Zed
|
||||
|
||||
Zed IDE supports AI Bridge via its `language_models` configuration in `settings.json`.
|
||||
|
||||
## Configuration
|
||||
|
||||
To configure Zed to use AI Bridge, you need to edit your `settings.json` file. You can access this by pressing `Cmd/Ctrl + ,` or opening the command palette and searching for "Open Settings".
|
||||
|
||||
You can configure both Anthropic and OpenAI providers to point to AI Bridge.
|
||||
|
||||
```json
|
||||
{
|
||||
"language_models": {
|
||||
"anthropic": {
|
||||
"api_url": "https://coder.example.com/api/v2/aibridge/anthropic",
|
||||
},
|
||||
"openai": {
|
||||
"api_url": "https://coder.example.com/api/v2/aibridge/openai/v1",
|
||||
},
|
||||
},
|
||||
// optional settings to set favorite models for the AI
|
||||
"agent": {
|
||||
"favorite_models": [
|
||||
{
|
||||
"provider": "anthropic",
|
||||
"model": "claude-sonnet-4-5-thinking-latest"
|
||||
},
|
||||
{
|
||||
"provider": "openai",
|
||||
"model": "gpt-5.2-codex"
|
||||
}
|
||||
],
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
*Replace `coder.example.com` with your Coder deployment URL.*
|
||||
|
||||
> [!NOTE]
|
||||
> These settings and environment variables need to be configured from client side. Zed currently does not support reading these settings from remote configuration. See this [feature request](https://github.com/zed-industries/zed/discussions/47058) for more details.
|
||||
|
||||
## Authentication
|
||||
|
||||
Zed requires an API key for these providers. For AI Bridge, this key is your **[Coder Session Token](../../../admin/users/sessions-tokens.md#generate-a-long-lived-api-token-on-behalf-of-yourself)**.
|
||||
|
||||
You can set this in two ways:
|
||||
|
||||
<div class="tabs">
|
||||
|
||||
### Zed UI
|
||||
|
||||
1. Open the **Assistant Panel** (right sidebar).
|
||||
1. Click **Configuration** or the settings icon.
|
||||
1. Select your provider ("Anthropic" or "OpenAI").
|
||||
1. Paste your **[Coder Session Token](../../../admin/users/sessions-tokens.md#generate-a-long-lived-api-token-on-behalf-of-yourself)** for the API Key.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
1. Set `ANTHROPIC_API_KEY` and `OPENAI_API_KEY` to your **[Coder Session Token](../../../admin/users/sessions-tokens.md#generate-a-long-lived-api-token-on-behalf-of-yourself)** in the environment where you launch Zed.
|
||||
|
||||
</div>
|
||||
|
||||
**References:** [Configuring Zed - Language Models](https://zed.dev/docs/reference/all-settings#language-models)
|
||||
@@ -33,7 +33,9 @@ AI Bridge is best suited for organizations facing these centralized management a
|
||||
## Next steps
|
||||
|
||||
- [Set up AI Bridge](./setup.md) on your Coder deployment
|
||||
- [Configure AI clients](./client-config.md) to use AI Bridge
|
||||
- [Configure AI clients](./clients/index.md) to use AI Bridge
|
||||
- [Configure MCP servers](./mcp.md) for tool access
|
||||
- [Monitor usage and metrics](./monitoring.md) and [configure data retention](./setup.md#data-retention)
|
||||
- [Reference documentation](./reference.md)
|
||||
|
||||
<children></children>
|
||||
|
||||
@@ -20,11 +20,11 @@ Where relevant, both streaming and non-streaming requests are supported.
|
||||
#### Intercepted
|
||||
|
||||
- [`/v1/chat/completions`](https://platform.openai.com/docs/api-reference/chat/create)
|
||||
- [`/v1/responses`](https://platform.openai.com/docs/api-reference/responses/create)
|
||||
|
||||
#### Passthrough
|
||||
|
||||
- [`/v1/models(/*)`](https://platform.openai.com/docs/api-reference/models/list)
|
||||
- [`/v1/responses`](https://platform.openai.com/docs/api-reference/responses/create) _(Interception support coming in **Beta**)_
|
||||
|
||||
### Anthropic
|
||||
|
||||
|
||||
|
After Width: | Height: | Size: 154 KiB |
|
After Width: | Height: | Size: 97 KiB |
|
After Width: | Height: | Size: 92 KiB |
|
After Width: | Height: | Size: 816 KiB |
|
After Width: | Height: | Size: 767 KiB |
|
After Width: | Height: | Size: 172 KiB |
|
After Width: | Height: | Size: 155 KiB |
|
After Width: | Height: | Size: 186 KiB |
|
After Width: | Height: | Size: 171 KiB |
|
After Width: | Height: | Size: 52 KiB |
@@ -258,15 +258,6 @@ reference, and not all security requirements may apply to your business.
|
||||
- Both the control plane and workspaces set resource request/limits by
|
||||
default.
|
||||
|
||||
7. **All Kubernetes objects must define liveness and readiness probes**
|
||||
|
||||
- Control plane - The control plane Deployment has liveness and readiness
|
||||
probes
|
||||
[configured by default here](https://github.com/coder/coder/blob/f57ce97b5aadd825ddb9a9a129bb823a3725252b/helm/coder/templates/_coder.tpl#L98-L107).
|
||||
- Workspaces - the Kubernetes Deployment template does not configure
|
||||
liveness/readiness probes for the workspace, but this can be added to the
|
||||
Terraform template, and is supported.
|
||||
|
||||
## Load balancing considerations
|
||||
|
||||
### AWS
|
||||
|
||||
@@ -1025,7 +1025,7 @@
|
||||
"description": "AI Gateway for Enterprise Governance \u0026 Observability",
|
||||
"path": "./ai-coder/ai-bridge/index.md",
|
||||
"icon_path": "./images/icons/api.svg",
|
||||
"state": ["premium", "beta"],
|
||||
"state": ["premium"],
|
||||
"children": [
|
||||
{
|
||||
"title": "Setup",
|
||||
@@ -1035,7 +1035,59 @@
|
||||
{
|
||||
"title": "Client Configuration",
|
||||
"description": "How to configure your AI coding tools to use AI Bridge",
|
||||
"path": "./ai-coder/ai-bridge/client-config.md"
|
||||
"path": "./ai-coder/ai-bridge/clients/index.md",
|
||||
"children": [
|
||||
{
|
||||
"title": "Claude Code",
|
||||
"description": "Configure Claude Code to use AI Bridge",
|
||||
"path": "./ai-coder/ai-bridge/clients/claude-code.md"
|
||||
},
|
||||
{
|
||||
"title": "Codex",
|
||||
"description": "Configure Codex to use AI Bridge",
|
||||
"path": "./ai-coder/ai-bridge/clients/codex.md"
|
||||
},
|
||||
{
|
||||
"title": "OpenCode",
|
||||
"description": "Configure OpenCode to use AI Bridge",
|
||||
"path": "./ai-coder/ai-bridge/clients/opencode.md"
|
||||
},
|
||||
{
|
||||
"title": "Factory",
|
||||
"description": "Configure Factory to use AI Bridge",
|
||||
"path": "./ai-coder/ai-bridge/clients/factory.md"
|
||||
},
|
||||
{
|
||||
"title": "Cline",
|
||||
"description": "Configure Cline to use AI Bridge",
|
||||
"path": "./ai-coder/ai-bridge/clients/cline.md"
|
||||
},
|
||||
{
|
||||
"title": "Kilo Code",
|
||||
"description": "Configure Kilo Code to use AI Bridge",
|
||||
"path": "./ai-coder/ai-bridge/clients/kilo-code.md"
|
||||
},
|
||||
{
|
||||
"title": "Roo Code",
|
||||
"description": "Configure Roo Code to use AI Bridge",
|
||||
"path": "./ai-coder/ai-bridge/clients/roo-code.md"
|
||||
},
|
||||
{
|
||||
"title": "VS Code",
|
||||
"description": "Configure VS Code to use AI Bridge",
|
||||
"path": "./ai-coder/ai-bridge/clients/vscode.md"
|
||||
},
|
||||
{
|
||||
"title": "JetBrains",
|
||||
"description": "Configure JetBrains IDEs to use AI Bridge",
|
||||
"path": "./ai-coder/ai-bridge/clients/jetbrains.md"
|
||||
},
|
||||
{
|
||||
"title": "Zed",
|
||||
"description": "Configure Zed to use AI Bridge",
|
||||
"path": "./ai-coder/ai-bridge/clients/zed.md"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"title": "MCP Tools Injection",
|
||||
|
||||
@@ -329,7 +329,6 @@ curl -X GET http://coder-server:8080/api/v2/entitlements \
|
||||
"enabled": true,
|
||||
"entitlement": "entitled",
|
||||
"limit": 0,
|
||||
"soft_limit": 0,
|
||||
"usage_period": {
|
||||
"end": "2019-08-24T14:15:22Z",
|
||||
"issued_at": "2019-08-24T14:15:22Z",
|
||||
@@ -341,7 +340,6 @@ curl -X GET http://coder-server:8080/api/v2/entitlements \
|
||||
"enabled": true,
|
||||
"entitlement": "entitled",
|
||||
"limit": 0,
|
||||
"soft_limit": 0,
|
||||
"usage_period": {
|
||||
"end": "2019-08-24T14:15:22Z",
|
||||
"issued_at": "2019-08-24T14:15:22Z",
|
||||
|
||||
@@ -3899,7 +3899,6 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o
|
||||
"enabled": true,
|
||||
"entitlement": "entitled",
|
||||
"limit": 0,
|
||||
"soft_limit": 0,
|
||||
"usage_period": {
|
||||
"end": "2019-08-24T14:15:22Z",
|
||||
"issued_at": "2019-08-24T14:15:22Z",
|
||||
@@ -3911,7 +3910,6 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o
|
||||
"enabled": true,
|
||||
"entitlement": "entitled",
|
||||
"limit": 0,
|
||||
"soft_limit": 0,
|
||||
"usage_period": {
|
||||
"end": "2019-08-24T14:15:22Z",
|
||||
"issued_at": "2019-08-24T14:15:22Z",
|
||||
@@ -4193,7 +4191,6 @@ Git clone makes use of this by parsing the URL from: 'Username for "https://gith
|
||||
"enabled": true,
|
||||
"entitlement": "entitled",
|
||||
"limit": 0,
|
||||
"soft_limit": 0,
|
||||
"usage_period": {
|
||||
"end": "2019-08-24T14:15:22Z",
|
||||
"issued_at": "2019-08-24T14:15:22Z",
|
||||
@@ -4204,13 +4201,12 @@ Git clone makes use of this by parsing the URL from: 'Username for "https://gith
|
||||
|
||||
### Properties
|
||||
|
||||
| Name | Type | Required | Restrictions | Description |
|
||||
|---------------|----------------------------------------------|----------|--------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `actual` | integer | false | | |
|
||||
| `enabled` | boolean | false | | |
|
||||
| `entitlement` | [codersdk.Entitlement](#codersdkentitlement) | false | | |
|
||||
| `limit` | integer | false | | |
|
||||
| `soft_limit` | integer | false | | Soft limit is the soft limit of the feature, and is only used for showing included limits in the dashboard. No license validation or warnings are generated from this value. |
|
||||
| Name | Type | Required | Restrictions | Description |
|
||||
|---------------|----------------------------------------------|----------|--------------|-------------|
|
||||
| `actual` | integer | false | | |
|
||||
| `enabled` | boolean | false | | |
|
||||
| `entitlement` | [codersdk.Entitlement](#codersdkentitlement) | false | | |
|
||||
| `limit` | integer | false | | |
|
||||
|`usage_period`|[codersdk.UsagePeriod](#codersdkusageperiod)|false||Usage period denotes that the usage is a counter that accumulates over this period (and most likely resets with the issuance of the next license).
|
||||
These dates are determined from the license that this entitlement comes from, see enterprise/coderd/license/license.go.
|
||||
Only certain features set these fields: - FeatureManagedAgentLimit|
|
||||
|
||||
@@ -1830,7 +1830,7 @@ Length of time to retain data such as interceptions and all related records (tok
|
||||
|-------------|----------------------------------------------|
|
||||
| Type | <code>int</code> |
|
||||
| Environment | <code>$CODER_AIBRIDGE_MAX_CONCURRENCY</code> |
|
||||
| YAML | <code>aibridge.maxConcurrency</code> |
|
||||
| YAML | <code>aibridge.max_concurrency</code> |
|
||||
| Default | <code>0</code> |
|
||||
|
||||
Maximum number of concurrent AI Bridge requests per replica. Set to 0 to disable (unlimited).
|
||||
@@ -1841,7 +1841,7 @@ Maximum number of concurrent AI Bridge requests per replica. Set to 0 to disable
|
||||
|-------------|-----------------------------------------|
|
||||
| Type | <code>int</code> |
|
||||
| Environment | <code>$CODER_AIBRIDGE_RATE_LIMIT</code> |
|
||||
| YAML | <code>aibridge.rateLimit</code> |
|
||||
| YAML | <code>aibridge.rate_limit</code> |
|
||||
| Default | <code>0</code> |
|
||||
|
||||
Maximum number of AI Bridge requests per second per replica. Set to 0 to disable (unlimited).
|
||||
@@ -1852,7 +1852,7 @@ Maximum number of AI Bridge requests per second per replica. Set to 0 to disable
|
||||
|-------------|-------------------------------------------------|
|
||||
| Type | <code>bool</code> |
|
||||
| Environment | <code>$CODER_AIBRIDGE_STRUCTURED_LOGGING</code> |
|
||||
| YAML | <code>aibridge.structuredLogging</code> |
|
||||
| YAML | <code>aibridge.structured_logging</code> |
|
||||
| Default | <code>false</code> |
|
||||
|
||||
Emit structured logs for AI Bridge interception records. Use this for exporting these records to external SIEM or observability systems.
|
||||
@@ -1874,7 +1874,7 @@ Once enabled, extra headers will be added to upstream requests to identify the u
|
||||
|-------------|------------------------------------------------------|
|
||||
| Type | <code>bool</code> |
|
||||
| Environment | <code>$CODER_AIBRIDGE_CIRCUIT_BREAKER_ENABLED</code> |
|
||||
| YAML | <code>aibridge.circuitBreakerEnabled</code> |
|
||||
| YAML | <code>aibridge.circuit_breaker_enabled</code> |
|
||||
| Default | <code>false</code> |
|
||||
|
||||
Enable the circuit breaker to protect against cascading failures from upstream AI provider rate limits (429, 503, 529 overloaded).
|
||||
|
||||
@@ -11,8 +11,8 @@ RUN cargo install jj-cli typos-cli watchexec-cli
|
||||
FROM ubuntu:jammy@sha256:c7eb020043d8fc2ae0793fb35a37bff1cf33f156d4d4b12ccc7f3ef8706c38b1 AS go
|
||||
|
||||
# Install Go manually, so that we can control the version
|
||||
ARG GO_VERSION=1.25.6
|
||||
ARG GO_CHECKSUM="f022b6aad78e362bcba9b0b94d09ad58c5a70c6ba3b7582905fababf5fe0181a"
|
||||
ARG GO_VERSION=1.25.7
|
||||
ARG GO_CHECKSUM="12e6d6a191091ae27dc31f6efc630e3a3b8ba409baf3573d955b196fdf086005"
|
||||
|
||||
# Boring Go is needed to build FIPS-compliant binaries.
|
||||
RUN apt-get update && \
|
||||
@@ -212,9 +212,9 @@ RUN sed -i 's|http://archive.ubuntu.com/ubuntu/|http://mirrors.edge.kernel.org/u
|
||||
# Configure FIPS-compliant policies
|
||||
update-crypto-policies --set FIPS
|
||||
|
||||
# NOTE: In scripts/Dockerfile.base we specifically install Terraform version 1.12.2.
|
||||
# NOTE: In scripts/Dockerfile.base we specifically install Terraform version 1.14.5.
|
||||
# Installing the same version here to match.
|
||||
RUN wget -O /tmp/terraform.zip "https://releases.hashicorp.com/terraform/1.14.1/terraform_1.14.1_linux_amd64.zip" && \
|
||||
RUN wget -O /tmp/terraform.zip "https://releases.hashicorp.com/terraform/1.14.5/terraform_1.14.5_linux_amd64.zip" && \
|
||||
unzip /tmp/terraform.zip -d /usr/local/bin && \
|
||||
rm -f /tmp/terraform.zip && \
|
||||
chmod +x /usr/local/bin/terraform && \
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -70,6 +71,8 @@ type Server struct {
|
||||
// caCert is the PEM-encoded CA certificate loaded during initialization.
|
||||
// This is served to clients who need to trust the proxy.
|
||||
caCert []byte
|
||||
// Metrics is the Prometheus metrics for the proxy. If nil, metrics are disabled.
|
||||
metrics *Metrics
|
||||
}
|
||||
|
||||
// requestContext holds metadata propagated through the proxy request/response chain.
|
||||
@@ -83,13 +86,13 @@ type requestContext struct {
|
||||
// CoderToken is the authentication token extracted from Proxy-Authorization.
|
||||
// Set in authMiddleware during the CONNECT handshake.
|
||||
CoderToken string
|
||||
// Provider is the aibridge provider name.
|
||||
// Set in authMiddleware during the CONNECT handshake.
|
||||
Provider string
|
||||
// RequestID is a unique identifier for this request.
|
||||
// Set in handleRequest for MITM'd requests.
|
||||
// Sent to aibridged via custom header for cross-service correlation.
|
||||
RequestID uuid.UUID
|
||||
// Provider is the aibridge provider name.
|
||||
// Set in handleRequest when handling MITM requests for allowlisted domains.
|
||||
Provider string
|
||||
}
|
||||
|
||||
// Options configures the AI Bridge Proxy server.
|
||||
@@ -126,6 +129,9 @@ type Options struct {
|
||||
// proxies with certificates not trusted by the system. If empty, the system
|
||||
// certificate pool is used.
|
||||
UpstreamProxyCA string
|
||||
// Metrics is the prometheus metrics instance for recording proxy metrics.
|
||||
// If nil, metrics will not be recorded.
|
||||
Metrics *Metrics
|
||||
}
|
||||
|
||||
func New(ctx context.Context, logger slog.Logger, opts Options) (*Server, error) {
|
||||
@@ -213,6 +219,20 @@ func New(ctx context.Context, logger slog.Logger, opts Options) (*Server, error)
|
||||
return nil, xerrors.Errorf("invalid upstream proxy URL %q: %w", opts.UpstreamProxy, err)
|
||||
}
|
||||
|
||||
// Extract and validate upstream proxy authentication if provided.
|
||||
// The credentials are parsed once at startup and reused for all
|
||||
// tunneled CONNECT requests through the upstream proxy.
|
||||
var connectReqHandler func(*http.Request)
|
||||
if upstreamURL.User != nil {
|
||||
proxyAuth := makeProxyAuthHeader(upstreamURL.User)
|
||||
if proxyAuth == "" {
|
||||
return nil, xerrors.Errorf("upstream proxy URL %q has invalid credentials: both username and password are empty", opts.UpstreamProxy)
|
||||
}
|
||||
connectReqHandler = func(req *http.Request) {
|
||||
req.Header.Set("Proxy-Authorization", proxyAuth)
|
||||
}
|
||||
}
|
||||
|
||||
// Set transport without Proxy to ensure MITM'd requests go directly to aibridge,
|
||||
// not through any upstream proxy.
|
||||
proxy.Tr = &http.Transport{
|
||||
@@ -244,7 +264,7 @@ func New(ctx context.Context, logger slog.Logger, opts Options) (*Server, error)
|
||||
// Configure tunneled CONNECT requests to go through upstream proxy.
|
||||
// This only affects non-allowlisted domains; allowlisted domains are
|
||||
// MITM'd and forwarded to aibridge.
|
||||
proxy.ConnectDial = proxy.NewConnectDialToProxy(opts.UpstreamProxy)
|
||||
proxy.ConnectDial = proxy.NewConnectDialToProxyWithHandler(opts.UpstreamProxy, connectReqHandler)
|
||||
}
|
||||
|
||||
srv := &Server{
|
||||
@@ -254,6 +274,7 @@ func New(ctx context.Context, logger slog.Logger, opts Options) (*Server, error)
|
||||
coderAccessURL: coderAccessURL,
|
||||
aibridgeProviderFromHost: aibridgeProviderFromHost,
|
||||
caCert: certPEM,
|
||||
metrics: opts.Metrics,
|
||||
}
|
||||
|
||||
// Reject CONNECT requests to non-standard ports.
|
||||
@@ -269,6 +290,11 @@ func New(ctx context.Context, logger slog.Logger, opts Options) (*Server, error)
|
||||
srv.authMiddleware,
|
||||
)
|
||||
|
||||
// Tunnel CONNECT requests for non-allowlisted domains directly to their destination.
|
||||
// goproxy calls handlers in registration order: this must come after the MITM handler
|
||||
// so it only handles requests that weren't matched by the allowlist.
|
||||
proxy.OnRequest().HandleConnectFunc(srv.tunneledMiddleware)
|
||||
|
||||
// Handle decrypted requests: route to aibridged for known AI providers, or tunnel to original destination.
|
||||
proxy.OnRequest().DoFunc(srv.handleRequest)
|
||||
// Handle responses from aibridged.
|
||||
@@ -320,6 +346,12 @@ func (s *Server) Close() error {
|
||||
return nil
|
||||
}
|
||||
s.logger.Info(s.ctx, "closing aibridgeproxyd server")
|
||||
|
||||
// Unregister metrics to clean up Prometheus registry.
|
||||
if s.metrics != nil {
|
||||
s.metrics.Unregister()
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
return s.httpServer.Shutdown(ctx)
|
||||
@@ -448,14 +480,27 @@ func (s *Server) authMiddleware(host string, ctx *goproxy.ProxyCtx) (*goproxy.Co
|
||||
// incrementing int64 that resets on process restart and is not globally unique.
|
||||
connectSessionID := uuid.New()
|
||||
|
||||
proxyAuth := ctx.Req.Header.Get("Proxy-Authorization")
|
||||
coderToken := extractCoderTokenFromProxyAuth(proxyAuth)
|
||||
|
||||
logger := s.logger.With(
|
||||
slog.F("connect_id", connectSessionID),
|
||||
slog.F("connect_id", connectSessionID.String()),
|
||||
slog.F("host", host),
|
||||
)
|
||||
|
||||
// Determine the provider from the request hostname.
|
||||
provider := s.aibridgeProviderFromHost(ctx.Req.URL.Hostname())
|
||||
// This should never happen: startup validation ensures all allowlisted
|
||||
// domains have known aibridge provider mappings.
|
||||
if provider == "" {
|
||||
logger.Error(s.ctx, "rejecting CONNECT request with no provider mapping")
|
||||
return goproxy.RejectConnect, host
|
||||
}
|
||||
|
||||
logger = logger.With(
|
||||
slog.F("provider", provider),
|
||||
)
|
||||
|
||||
proxyAuth := ctx.Req.Header.Get("Proxy-Authorization")
|
||||
coderToken := extractCoderTokenFromProxyAuth(proxyAuth)
|
||||
|
||||
// Reject requests for both missing and invalid credentials
|
||||
if coderToken == "" {
|
||||
hasAuth := proxyAuth != ""
|
||||
@@ -474,13 +519,43 @@ func (s *Server) authMiddleware(host string, ctx *goproxy.ProxyCtx) (*goproxy.Co
|
||||
ctx.UserData = &requestContext{
|
||||
ConnectSessionID: connectSessionID,
|
||||
CoderToken: coderToken,
|
||||
Provider: provider,
|
||||
}
|
||||
|
||||
logger.Debug(s.ctx, "request CONNECT authenticated")
|
||||
|
||||
// Record successful MITM CONNECT session establishment.
|
||||
if s.metrics != nil {
|
||||
s.metrics.ConnectSessionsTotal.WithLabelValues(RequestTypeMITM).Inc()
|
||||
}
|
||||
|
||||
return goproxy.MitmConnect, host
|
||||
}
|
||||
|
||||
// makeProxyAuthHeader creates a Proxy-Authorization header value from URL user info.
|
||||
//
|
||||
// Valid formats:
|
||||
// - username:password -> Basic auth with both credentials
|
||||
// - username: or username -> Basic auth with username only (empty password)
|
||||
// - :password -> Basic auth with empty username (token-based proxies)
|
||||
//
|
||||
// Returns empty string when both username and password are empty.
|
||||
func makeProxyAuthHeader(userInfo *url.Userinfo) string {
|
||||
if userInfo == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
username := userInfo.Username()
|
||||
password, _ := userInfo.Password()
|
||||
|
||||
// Reject only when both username and password are empty (no credentials).
|
||||
if username == "" && password == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
return "Basic " + base64.StdEncoding.EncodeToString([]byte(userInfo.String()))
|
||||
}
|
||||
|
||||
// extractCoderTokenFromProxyAuth extracts the Coder token from the
|
||||
// Proxy-Authorization header. The token is expected to be in the password
|
||||
// field of basic auth: "Basic base64(username:token)".
|
||||
@@ -551,6 +626,20 @@ func defaultAIBridgeProvider(host string) string {
|
||||
}
|
||||
}
|
||||
|
||||
// tunneledMiddleware is a CONNECT middleware that handles tunneled (non-allowlisted)
|
||||
// connections. These connections are not MITM'd and are tunneled directly to their
|
||||
// destination. This middleware records metrics for tunneled CONNECT sessions.
|
||||
func (s *Server) tunneledMiddleware(host string, _ *goproxy.ProxyCtx) (*goproxy.ConnectAction, string) {
|
||||
// Record tunneled CONNECT session establishment.
|
||||
if s.metrics != nil {
|
||||
s.metrics.ConnectSessionsTotal.WithLabelValues(RequestTypeTunneled).Inc()
|
||||
}
|
||||
|
||||
// Return OkConnect to allow the tunnel to be established.
|
||||
// goproxy will create a tunnel between the client and the destination.
|
||||
return goproxy.OkConnect, host
|
||||
}
|
||||
|
||||
// handleRequest intercepts HTTP requests after MITM decryption.
|
||||
// - Requests to known AI providers are rewritten to aibridged, with the Coder token
|
||||
// (from ctx.UserData, set during CONNECT) set in the X-Coder-Token header.
|
||||
@@ -566,11 +655,27 @@ func (s *Server) handleRequest(req *http.Request, ctx *goproxy.ProxyCtx) (*http.
|
||||
slog.F("method", req.Method),
|
||||
slog.F("path", originalPath),
|
||||
)
|
||||
|
||||
resp := goproxy.NewResponse(req, goproxy.ContentTypeText, http.StatusProxyAuthRequired, "Proxy authentication required")
|
||||
resp.Header.Set("Proxy-Authenticate", `Basic realm="Coder AI Bridge Proxy"`)
|
||||
return req, resp
|
||||
}
|
||||
|
||||
if reqCtx.Provider == "" {
|
||||
// This should never happen: startup validation ensures all allowlisted
|
||||
// domains have known aibridge provider mappings.
|
||||
// The request is MITM'd (decrypted) but since there is no mapping,
|
||||
// there is no known route to aibridge.
|
||||
// Log error and forward to the original destination as a fallback.
|
||||
s.logger.Error(s.ctx, "decrypted request has no provider mapping, passing through",
|
||||
slog.F("connect_id", reqCtx.ConnectSessionID.String()),
|
||||
slog.F("host", req.Host),
|
||||
slog.F("method", req.Method),
|
||||
slog.F("path", originalPath),
|
||||
)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// Generate a unique request ID for this request.
|
||||
// This ID is sent to aibridged for cross-service log correlation.
|
||||
reqCtx.RequestID = uuid.New()
|
||||
@@ -581,22 +686,9 @@ func (s *Server) handleRequest(req *http.Request, ctx *goproxy.ProxyCtx) (*http.
|
||||
slog.F("host", req.Host),
|
||||
slog.F("method", req.Method),
|
||||
slog.F("path", originalPath),
|
||||
slog.F("provider", reqCtx.Provider),
|
||||
)
|
||||
|
||||
// Check if this request is for a supported AI provider.
|
||||
provider := s.aibridgeProviderFromHost(req.URL.Hostname())
|
||||
if provider == "" {
|
||||
// This should never happen: startup validation ensures all allowlisted
|
||||
// domains have known aibridge provider mappings.
|
||||
// The request is MITM'd (decrypted) but since there is no mapping,
|
||||
// there is no known route to aibridge.
|
||||
// Log error and forward to the original destination as a fallback.
|
||||
logger.Error(s.ctx, "decrypted request has no provider mapping, passing through")
|
||||
return req, nil
|
||||
}
|
||||
|
||||
logger = logger.With(slog.F("provider", provider))
|
||||
|
||||
// Reject unauthenticated requests to AI providers.
|
||||
if reqCtx.CoderToken == "" {
|
||||
logger.Warn(s.ctx, "rejecting unauthenticated request to AI provider")
|
||||
@@ -604,16 +696,13 @@ func (s *Server) handleRequest(req *http.Request, ctx *goproxy.ProxyCtx) (*http.
|
||||
return req, newProxyAuthRequiredResponse(req)
|
||||
}
|
||||
|
||||
// Store provider in context for response handler.
|
||||
reqCtx.Provider = provider
|
||||
|
||||
// Rewrite the request to point to aibridged.
|
||||
if s.coderAccessURL == nil || s.coderAccessURL.String() == "" {
|
||||
logger.Error(s.ctx, "coderAccessURL is not configured")
|
||||
return req, goproxy.NewResponse(req, goproxy.ContentTypeText, http.StatusInternalServerError, "Proxy misconfigured")
|
||||
}
|
||||
|
||||
aiBridgeURL, err := url.JoinPath(s.coderAccessURL.String(), "api/v2/aibridge", provider, originalPath)
|
||||
aiBridgeURL, err := url.JoinPath(s.coderAccessURL.String(), "api/v2/aibridge", reqCtx.Provider, originalPath)
|
||||
if err != nil {
|
||||
logger.Error(s.ctx, "failed to build aibridged URL", slog.Error(err))
|
||||
return req, goproxy.NewResponse(req, goproxy.ContentTypeText, http.StatusInternalServerError, "Failed to build AI Bridge URL")
|
||||
@@ -644,6 +733,12 @@ func (s *Server) handleRequest(req *http.Request, ctx *goproxy.ProxyCtx) (*http.
|
||||
slog.F("aibridged_url", aiBridgeParsedURL.String()),
|
||||
)
|
||||
|
||||
// Record MITM request handling.
|
||||
if s.metrics != nil {
|
||||
s.metrics.MITMRequestsTotal.WithLabelValues(reqCtx.Provider).Inc()
|
||||
s.metrics.InflightMITMRequests.WithLabelValues(reqCtx.Provider).Inc()
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
@@ -665,13 +760,30 @@ func (s *Server) handleResponse(resp *http.Response, ctx *goproxy.ProxyCtx) *htt
|
||||
provider = reqCtx.Provider
|
||||
}
|
||||
|
||||
s.logger.Debug(s.ctx, "received response from aibridged",
|
||||
logger := s.logger.With(
|
||||
slog.F("connect_id", connectSessionID.String()),
|
||||
slog.F("request_id", requestID.String()),
|
||||
slog.F("status", resp.StatusCode),
|
||||
slog.F("provider", provider),
|
||||
slog.F("status", resp.StatusCode),
|
||||
)
|
||||
|
||||
switch {
|
||||
case resp.StatusCode >= http.StatusInternalServerError:
|
||||
logger.Error(s.ctx, "received error response from aibridged")
|
||||
case resp.StatusCode >= http.StatusBadRequest:
|
||||
logger.Warn(s.ctx, "received error response from aibridged")
|
||||
default:
|
||||
logger.Debug(s.ctx, "received response from aibridged")
|
||||
}
|
||||
|
||||
if s.metrics != nil && provider != "" {
|
||||
// Decrement inflight requests gauge now that the request is complete.
|
||||
s.metrics.InflightMITMRequests.WithLabelValues(provider).Dec()
|
||||
|
||||
// Record response by status code.
|
||||
s.metrics.MITMResponsesTotal.WithLabelValues(strconv.Itoa(resp.StatusCode), provider).Inc()
|
||||
}
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@@ -110,6 +111,7 @@ type testProxyConfig struct {
|
||||
aibridgeProviderFromHost func(string) string
|
||||
upstreamProxy string
|
||||
upstreamProxyCA string
|
||||
metrics *aibridgeproxyd.Metrics
|
||||
}
|
||||
|
||||
type testProxyOption func(*testProxyConfig)
|
||||
@@ -156,6 +158,12 @@ func withUpstreamProxyCA(upstreamProxyCA string) testProxyOption {
|
||||
}
|
||||
}
|
||||
|
||||
func withMetrics(metrics *aibridgeproxyd.Metrics) testProxyOption {
|
||||
return func(cfg *testProxyConfig) {
|
||||
cfg.metrics = metrics
|
||||
}
|
||||
}
|
||||
|
||||
// newTestProxy creates a new AI Bridge Proxy server for testing.
|
||||
// It uses the shared test CA and registers cleanup automatically.
|
||||
// It waits for the proxy server to be ready before returning.
|
||||
@@ -187,6 +195,7 @@ func newTestProxy(t *testing.T, opts ...testProxyOption) *aibridgeproxyd.Server
|
||||
AIBridgeProviderFromHost: cfg.aibridgeProviderFromHost,
|
||||
UpstreamProxy: cfg.upstreamProxy,
|
||||
UpstreamProxyCA: cfg.upstreamProxyCA,
|
||||
Metrics: cfg.metrics,
|
||||
}
|
||||
if cfg.certStore != nil {
|
||||
aibridgeOpts.CertStore = cfg.certStore
|
||||
@@ -569,6 +578,24 @@ func TestNew(t *testing.T) {
|
||||
require.Contains(t, err.Error(), "failed to read upstream proxy CA certificate")
|
||||
})
|
||||
|
||||
t.Run("UpstreamProxyAuthWithBothEmpty", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
certFile, keyFile := getSharedTestCA(t)
|
||||
logger := slogtest.Make(t, nil)
|
||||
|
||||
_, err := aibridgeproxyd.New(t.Context(), logger, aibridgeproxyd.Options{
|
||||
ListenAddr: "127.0.0.1:0",
|
||||
CoderAccessURL: "http://localhost:3000",
|
||||
CertFile: certFile,
|
||||
KeyFile: keyFile,
|
||||
DomainAllowlist: []string{aibridgeproxyd.HostAnthropic, aibridgeproxyd.HostOpenAI},
|
||||
UpstreamProxy: "http://:@proxy.example.com:8080",
|
||||
})
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "invalid credentials: both username and password are empty")
|
||||
})
|
||||
|
||||
t.Run("Success", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -623,29 +650,162 @@ func TestNew(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, srv)
|
||||
})
|
||||
|
||||
t.Run("SuccessWithUpstreamProxyAuth", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
certFile, keyFile := getSharedTestCA(t)
|
||||
logger := slogtest.Make(t, nil)
|
||||
|
||||
srv, err := aibridgeproxyd.New(t.Context(), logger, aibridgeproxyd.Options{
|
||||
ListenAddr: "127.0.0.1:0",
|
||||
CoderAccessURL: "http://localhost:3000",
|
||||
CertFile: certFile,
|
||||
KeyFile: keyFile,
|
||||
DomainAllowlist: []string{aibridgeproxyd.HostAnthropic, aibridgeproxyd.HostOpenAI},
|
||||
UpstreamProxy: "http://proxyuser:proxypass@proxy.example.com:8080",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, srv)
|
||||
})
|
||||
|
||||
t.Run("SuccessWithUpstreamProxyUsernameAuthColon", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
certFile, keyFile := getSharedTestCA(t)
|
||||
logger := slogtest.Make(t, nil)
|
||||
|
||||
srv, err := aibridgeproxyd.New(t.Context(), logger, aibridgeproxyd.Options{
|
||||
ListenAddr: "127.0.0.1:0",
|
||||
CoderAccessURL: "http://localhost:3000",
|
||||
CertFile: certFile,
|
||||
KeyFile: keyFile,
|
||||
DomainAllowlist: []string{aibridgeproxyd.HostAnthropic, aibridgeproxyd.HostOpenAI},
|
||||
UpstreamProxy: "http://proxyuser:@proxy.example.com:8080",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, srv)
|
||||
})
|
||||
|
||||
t.Run("SuccessWithUpstreamProxyUsernameAuth", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
certFile, keyFile := getSharedTestCA(t)
|
||||
logger := slogtest.Make(t, nil)
|
||||
|
||||
// Username only (no colon) should also succeed (password is optional)
|
||||
srv, err := aibridgeproxyd.New(t.Context(), logger, aibridgeproxyd.Options{
|
||||
ListenAddr: "127.0.0.1:0",
|
||||
CoderAccessURL: "http://localhost:3000",
|
||||
CertFile: certFile,
|
||||
KeyFile: keyFile,
|
||||
DomainAllowlist: []string{aibridgeproxyd.HostAnthropic, aibridgeproxyd.HostOpenAI},
|
||||
UpstreamProxy: "http://proxyuser@proxy.example.com:8080",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, srv)
|
||||
})
|
||||
|
||||
t.Run("SuccessWithUpstreamProxyTokenAuth", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
certFile, keyFile := getSharedTestCA(t)
|
||||
logger := slogtest.Make(t, nil)
|
||||
|
||||
srv, err := aibridgeproxyd.New(t.Context(), logger, aibridgeproxyd.Options{
|
||||
ListenAddr: "127.0.0.1:0",
|
||||
CoderAccessURL: "http://localhost:3000",
|
||||
CertFile: certFile,
|
||||
KeyFile: keyFile,
|
||||
DomainAllowlist: []string{aibridgeproxyd.HostAnthropic, aibridgeproxyd.HostOpenAI},
|
||||
UpstreamProxy: "http://:proxypass@proxy.example.com:8080",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, srv)
|
||||
})
|
||||
|
||||
t.Run("SuccessWithMetrics", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
certFile, keyFile := getSharedTestCA(t)
|
||||
logger := slogtest.Make(t, nil)
|
||||
|
||||
// Create metrics instance to verify it can be passed and stored.
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := aibridgeproxyd.NewMetrics(reg)
|
||||
|
||||
srv, err := aibridgeproxyd.New(t.Context(), logger, aibridgeproxyd.Options{
|
||||
ListenAddr: "127.0.0.1:0",
|
||||
CoderAccessURL: "http://localhost:3000",
|
||||
CertFile: certFile,
|
||||
KeyFile: keyFile,
|
||||
DomainAllowlist: []string{aibridgeproxyd.HostAnthropic, aibridgeproxyd.HostOpenAI},
|
||||
Metrics: metrics,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, srv)
|
||||
})
|
||||
}
|
||||
|
||||
func TestClose(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
certFile, keyFile := getSharedTestCA(t)
|
||||
logger := slogtest.Make(t, nil)
|
||||
t.Run("Success", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
srv, err := aibridgeproxyd.New(t.Context(), logger, aibridgeproxyd.Options{
|
||||
ListenAddr: "127.0.0.1:0",
|
||||
CoderAccessURL: "http://localhost:3000",
|
||||
CertFile: certFile,
|
||||
KeyFile: keyFile,
|
||||
DomainAllowlist: []string{aibridgeproxyd.HostAnthropic, aibridgeproxyd.HostOpenAI},
|
||||
certFile, keyFile := getSharedTestCA(t)
|
||||
logger := slogtest.Make(t, nil)
|
||||
|
||||
srv, err := aibridgeproxyd.New(t.Context(), logger, aibridgeproxyd.Options{
|
||||
ListenAddr: "127.0.0.1:0",
|
||||
CoderAccessURL: "http://localhost:3000",
|
||||
CertFile: certFile,
|
||||
KeyFile: keyFile,
|
||||
DomainAllowlist: []string{aibridgeproxyd.HostAnthropic, aibridgeproxyd.HostOpenAI},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = srv.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Calling Close again should not error.
|
||||
err = srv.Close()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = srv.Close()
|
||||
require.NoError(t, err)
|
||||
t.Run("WithMetrics", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Calling Close again should not error
|
||||
err = srv.Close()
|
||||
require.NoError(t, err)
|
||||
certFile, keyFile := getSharedTestCA(t)
|
||||
logger := slogtest.Make(t, nil)
|
||||
|
||||
// Create metrics instance to verify Close() properly unregisters them.
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := aibridgeproxyd.NewMetrics(reg)
|
||||
|
||||
srv, err := aibridgeproxyd.New(t.Context(), logger, aibridgeproxyd.Options{
|
||||
ListenAddr: "127.0.0.1:0",
|
||||
CoderAccessURL: "http://localhost:3000",
|
||||
CertFile: certFile,
|
||||
KeyFile: keyFile,
|
||||
DomainAllowlist: []string{aibridgeproxyd.HostAnthropic, aibridgeproxyd.HostOpenAI},
|
||||
Metrics: metrics,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = srv.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify metrics were unregistered by attempting to register new metrics
|
||||
// with the same registry. This should succeed if the old metrics were
|
||||
// properly unregistered.
|
||||
newMetrics := aibridgeproxyd.NewMetrics(reg)
|
||||
require.NotNil(t, newMetrics, "should be able to create new metrics after Close() unregisters old ones")
|
||||
|
||||
// Calling Close again should not error.
|
||||
err = srv.Close()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProxy_CertCaching(t *testing.T) {
|
||||
@@ -913,6 +1073,7 @@ func TestProxy_MITM(t *testing.T) {
|
||||
buildTargetURL func(tunneledURL *url.URL) (string, error)
|
||||
tunneled bool
|
||||
expectedPath string
|
||||
provider string
|
||||
}{
|
||||
{
|
||||
name: "MitmdAnthropic",
|
||||
@@ -922,6 +1083,7 @@ func TestProxy_MITM(t *testing.T) {
|
||||
return "https://api.anthropic.com/v1/messages", nil
|
||||
},
|
||||
expectedPath: "/api/v2/aibridge/anthropic/v1/messages",
|
||||
provider: "anthropic",
|
||||
},
|
||||
{
|
||||
name: "MitmdAnthropicNonDefaultPort",
|
||||
@@ -931,6 +1093,7 @@ func TestProxy_MITM(t *testing.T) {
|
||||
return "https://api.anthropic.com:8443/v1/messages", nil
|
||||
},
|
||||
expectedPath: "/api/v2/aibridge/anthropic/v1/messages",
|
||||
provider: "anthropic",
|
||||
},
|
||||
{
|
||||
name: "MitmdOpenAI",
|
||||
@@ -940,6 +1103,7 @@ func TestProxy_MITM(t *testing.T) {
|
||||
return "https://api.openai.com/v1/chat/completions", nil
|
||||
},
|
||||
expectedPath: "/api/v2/aibridge/openai/v1/chat/completions",
|
||||
provider: "openai",
|
||||
},
|
||||
{
|
||||
name: "MitmdOpenAINonDefaultPort",
|
||||
@@ -949,6 +1113,7 @@ func TestProxy_MITM(t *testing.T) {
|
||||
return "https://api.openai.com:8443/v1/chat/completions", nil
|
||||
},
|
||||
expectedPath: "/api/v2/aibridge/openai/v1/chat/completions",
|
||||
provider: "openai",
|
||||
},
|
||||
{
|
||||
name: "TunneledUnknownHost",
|
||||
@@ -965,6 +1130,10 @@ func TestProxy_MITM(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create metrics for verification.
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := aibridgeproxyd.NewMetrics(reg)
|
||||
|
||||
// Track what aibridged receives.
|
||||
var receivedPath, receivedCoderToken, receivedRequestID string
|
||||
|
||||
@@ -1003,6 +1172,7 @@ func TestProxy_MITM(t *testing.T) {
|
||||
withDomainAllowlist(domainAllowlist...),
|
||||
// Use default provider mapping to test real AI provider routing.
|
||||
withAIBridgeProviderFromHost(nil),
|
||||
withMetrics(metrics),
|
||||
)
|
||||
|
||||
// Build the target URL:
|
||||
@@ -1036,12 +1206,25 @@ func TestProxy_MITM(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
|
||||
// Gather metrics for verification.
|
||||
gatheredMetrics, err := reg.Gather()
|
||||
require.NoError(t, err)
|
||||
|
||||
if tt.tunneled {
|
||||
// Verify request went to target server, not aibridged.
|
||||
require.Equal(t, "hello from tunneled", string(body))
|
||||
require.Empty(t, receivedPath, "aibridged should not receive tunneled requests")
|
||||
require.Empty(t, receivedCoderToken, "tunneled requests are not authenticated by the proxy")
|
||||
require.Empty(t, receivedRequestID, "tunneled requests should not have request ID header")
|
||||
|
||||
// Verify metrics for tunneled requests.
|
||||
require.True(t, testutil.PromCounterHasValue(t, gatheredMetrics, 1, "connect_sessions_total", aibridgeproxyd.RequestTypeTunneled))
|
||||
|
||||
// Verify MITM-specific metrics were not set.
|
||||
require.False(t, testutil.PromCounterGathered(t, gatheredMetrics, "connect_sessions_total", aibridgeproxyd.RequestTypeMITM))
|
||||
require.False(t, testutil.PromCounterGathered(t, gatheredMetrics, "mitm_requests_total", tt.provider))
|
||||
require.False(t, testutil.PromGaugeGathered(t, gatheredMetrics, "inflight_mitm_requests", tt.provider))
|
||||
require.False(t, testutil.PromCounterGathered(t, gatheredMetrics, "mitm_responses_total", "200", tt.provider))
|
||||
} else {
|
||||
// Verify the request was routed to aibridged correctly.
|
||||
require.Equal(t, "hello from aibridged", string(body))
|
||||
@@ -1050,6 +1233,15 @@ func TestProxy_MITM(t *testing.T) {
|
||||
require.NotEmpty(t, receivedRequestID, "MITM'd requests must include request ID header")
|
||||
_, err := uuid.Parse(receivedRequestID)
|
||||
require.NoError(t, err, "request ID must be a valid UUID")
|
||||
|
||||
// Verify metrics for MITM requests.
|
||||
require.True(t, testutil.PromCounterHasValue(t, gatheredMetrics, 1, "connect_sessions_total", aibridgeproxyd.RequestTypeMITM))
|
||||
require.True(t, testutil.PromCounterHasValue(t, gatheredMetrics, 1, "mitm_requests_total", tt.provider))
|
||||
require.True(t, testutil.PromGaugeHasValue(t, gatheredMetrics, 0, "inflight_mitm_requests", tt.provider))
|
||||
require.True(t, testutil.PromCounterHasValue(t, gatheredMetrics, 1, "mitm_responses_total", "200", tt.provider))
|
||||
|
||||
// Verify tunneled counter was not set.
|
||||
require.False(t, testutil.PromCounterGathered(t, gatheredMetrics, "connect_sessions_total", aibridgeproxyd.RequestTypeTunneled))
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1193,6 +1385,9 @@ func TestUpstreamProxy(t *testing.T) {
|
||||
buildTargetURL func(finalDestinationURL *url.URL) string
|
||||
// expectedAIBridgePath is the path aibridge should receive for MITM requests.
|
||||
expectedAIBridgePath string
|
||||
// upstreamProxyAuth is optional "user:pass" credentials for the upstream proxy.
|
||||
// If set, the test verifies the Proxy-Authorization header is sent correctly.
|
||||
upstreamProxyAuth string
|
||||
}{
|
||||
{
|
||||
name: "NonAllowlistedDomain_TunneledToHTTPUpstreamProxy",
|
||||
@@ -1210,6 +1405,42 @@ func TestUpstreamProxy(t *testing.T) {
|
||||
return fmt.Sprintf("https://%s/tunneled-path", finalDestinationURL.Host)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NonAllowlistedDomain_TunneledToHTTPUpstreamProxyWithAuth",
|
||||
tunneled: true,
|
||||
upstreamProxyTLS: false,
|
||||
upstreamProxyAuth: "proxyuser:proxypass",
|
||||
buildTargetURL: func(finalDestinationURL *url.URL) string {
|
||||
return fmt.Sprintf("https://%s/tunneled-path", finalDestinationURL.Host)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NonAllowlistedDomain_TunneledToHTTPUpstreamProxyWithUsernameOnly",
|
||||
tunneled: true,
|
||||
upstreamProxyTLS: false,
|
||||
upstreamProxyAuth: "proxyuser",
|
||||
buildTargetURL: func(finalDestinationURL *url.URL) string {
|
||||
return fmt.Sprintf("https://%s/tunneled-path", finalDestinationURL.Host)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NonAllowlistedDomain_TunneledToHTTPUpstreamProxyWithUsernameAndColon",
|
||||
tunneled: true,
|
||||
upstreamProxyTLS: false,
|
||||
upstreamProxyAuth: "proxyuser:",
|
||||
buildTargetURL: func(finalDestinationURL *url.URL) string {
|
||||
return fmt.Sprintf("https://%s/tunneled-path", finalDestinationURL.Host)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NonAllowlistedDomain_TunneledToHTTPUpstreamProxyWithTokenAuth",
|
||||
tunneled: true,
|
||||
upstreamProxyTLS: false,
|
||||
upstreamProxyAuth: ":proxypass",
|
||||
buildTargetURL: func(finalDestinationURL *url.URL) string {
|
||||
return fmt.Sprintf("https://%s/tunneled-path", finalDestinationURL.Host)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "AllowlistedDomain_MITMByAIProxy",
|
||||
tunneled: false,
|
||||
@@ -1229,6 +1460,7 @@ func TestUpstreamProxy(t *testing.T) {
|
||||
var (
|
||||
upstreamProxyCONNECTReceived bool
|
||||
upstreamProxyCONNECTHost string
|
||||
upstreamProxyAuthHeader string
|
||||
finalDestinationReceived bool
|
||||
finalDestinationPath string
|
||||
finalDestinationBody string
|
||||
@@ -1263,6 +1495,7 @@ func TestUpstreamProxy(t *testing.T) {
|
||||
|
||||
upstreamProxyCONNECTReceived = true
|
||||
upstreamProxyCONNECTHost = r.Host
|
||||
upstreamProxyAuthHeader = r.Header.Get("Proxy-Authorization")
|
||||
|
||||
// Connect to the mock final destination server.
|
||||
targetConn, err := net.Dial("tcp", finalDestinationURL.Host)
|
||||
@@ -1352,11 +1585,19 @@ func TestUpstreamProxy(t *testing.T) {
|
||||
// - For MITM, api.anthropic.com must be in the allowlist.
|
||||
domainAllowlist := []string{aibridgeproxyd.HostAnthropic}
|
||||
|
||||
// Build upstream proxy URL with optional auth credentials.
|
||||
upstreamProxyURLStr := upstreamProxy.URL
|
||||
if tt.upstreamProxyAuth != "" {
|
||||
parsed, err := url.Parse(upstreamProxy.URL)
|
||||
require.NoError(t, err)
|
||||
upstreamProxyURLStr = fmt.Sprintf("%s://%s@%s", parsed.Scheme, tt.upstreamProxyAuth, parsed.Host)
|
||||
}
|
||||
|
||||
// Create aiproxy with upstream proxy configured.
|
||||
proxyOpts := []testProxyOption{
|
||||
withCoderAccessURL(aibridgeServer.URL),
|
||||
withDomainAllowlist(domainAllowlist...),
|
||||
withUpstreamProxy(upstreamProxy.URL),
|
||||
withUpstreamProxy(upstreamProxyURLStr),
|
||||
withAllowedPorts("80", "443", parsedTargetURL.Port()),
|
||||
// Use default provider mapping to test real AI provider routing.
|
||||
withAIBridgeProviderFromHost(nil),
|
||||
@@ -1422,6 +1663,13 @@ func TestUpstreamProxy(t *testing.T) {
|
||||
require.False(t, finalDestinationReceived,
|
||||
"final destination should NOT receive request for allowlisted domain")
|
||||
}
|
||||
|
||||
// Verify upstream proxy authentication if configured.
|
||||
if tt.upstreamProxyAuth != "" {
|
||||
expectedAuth := "Basic " + base64.StdEncoding.EncodeToString([]byte(tt.upstreamProxyAuth))
|
||||
require.Equal(t, expectedAuth, upstreamProxyAuthHeader,
|
||||
"Proxy-Authorization header should contain correct credentials")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,70 @@
|
||||
package aibridgeproxyd
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
const (
|
||||
RequestTypeMITM = "mitm"
|
||||
RequestTypeTunneled = "tunneled"
|
||||
)
|
||||
|
||||
// Metrics holds all prometheus metrics for aibridgeproxyd.
|
||||
type Metrics struct {
|
||||
registerer prometheus.Registerer
|
||||
|
||||
// ConnectSessionsTotal counts CONNECT sessions established.
|
||||
// Labels: type (mitm/tunneled)
|
||||
ConnectSessionsTotal *prometheus.CounterVec
|
||||
|
||||
// MITMRequestsTotal counts MITM requests handled by the proxy.
|
||||
// Labels: provider
|
||||
MITMRequestsTotal *prometheus.CounterVec
|
||||
|
||||
// InflightMITMRequests tracks the number of MITM requests currently being processed.
|
||||
// Labels: provider
|
||||
InflightMITMRequests *prometheus.GaugeVec
|
||||
|
||||
// MITMResponsesTotal counts MITM responses by HTTP status code.
|
||||
// Labels: code (HTTP status code), provider
|
||||
// Cardinality is bounded: ~100 used status codes x few providers.
|
||||
MITMResponsesTotal *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewMetrics creates and registers all metrics for aibridgeproxyd.
|
||||
func NewMetrics(reg prometheus.Registerer) *Metrics {
|
||||
factory := promauto.With(reg)
|
||||
|
||||
return &Metrics{
|
||||
registerer: reg,
|
||||
|
||||
ConnectSessionsTotal: factory.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "connect_sessions_total",
|
||||
Help: "Total number of CONNECT sessions established.",
|
||||
}, []string{"type"}),
|
||||
|
||||
MITMRequestsTotal: factory.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "mitm_requests_total",
|
||||
Help: "Total number of MITM requests handled by the proxy.",
|
||||
}, []string{"provider"}),
|
||||
|
||||
InflightMITMRequests: factory.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "inflight_mitm_requests",
|
||||
Help: "Number of MITM requests currently being processed.",
|
||||
}, []string{"provider"}),
|
||||
|
||||
MITMResponsesTotal: factory.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "mitm_responses_total",
|
||||
Help: "Total number of MITM responses by HTTP status code class.",
|
||||
}, []string{"code", "provider"}),
|
||||
}
|
||||
}
|
||||
|
||||
// Unregister removes all metrics from the registerer.
|
||||
func (m *Metrics) Unregister() {
|
||||
m.registerer.Unregister(m.ConnectSessionsTotal)
|
||||
m.registerer.Unregister(m.MITMRequestsTotal)
|
||||
m.registerer.Unregister(m.InflightMITMRequests)
|
||||
m.registerer.Unregister(m.MITMResponsesTotal)
|
||||
}
|
||||
@@ -5,6 +5,7 @@ package cli
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/enterprise/aibridgeproxyd"
|
||||
@@ -17,6 +18,9 @@ func newAIBridgeProxyDaemon(coderAPI *coderd.API) (*aibridgeproxyd.Server, error
|
||||
|
||||
logger := coderAPI.Logger.Named("aibridgeproxyd")
|
||||
|
||||
reg := prometheus.WrapRegistererWithPrefix("coder_aibridgeproxyd_", coderAPI.PrometheusRegistry)
|
||||
metrics := aibridgeproxyd.NewMetrics(reg)
|
||||
|
||||
srv, err := aibridgeproxyd.New(ctx, logger, aibridgeproxyd.Options{
|
||||
ListenAddr: coderAPI.DeploymentValues.AI.BridgeProxyConfig.ListenAddr.String(),
|
||||
CoderAccessURL: coderAPI.AccessURL.String(),
|
||||
@@ -25,6 +29,7 @@ func newAIBridgeProxyDaemon(coderAPI *coderd.API) (*aibridgeproxyd.Server, error
|
||||
DomainAllowlist: coderAPI.DeploymentValues.AI.BridgeProxyConfig.DomainAllowlist.Value(),
|
||||
UpstreamProxy: coderAPI.DeploymentValues.AI.BridgeProxyConfig.UpstreamProxy.String(),
|
||||
UpstreamProxyCA: coderAPI.DeploymentValues.AI.BridgeProxyConfig.UpstreamProxyCA.String(),
|
||||
Metrics: metrics,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to start in-memory aibridgeproxy daemon: %w", err)
|
||||
|
||||
@@ -371,7 +371,7 @@ func TestEnterpriseCreateWithPreset(t *testing.T) {
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer(db)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer()
|
||||
api.AGPL.PrebuildsClaimer.Store(&claimer)
|
||||
|
||||
// Given: a template and a template version where the preset defines values for all required parameters,
|
||||
@@ -484,7 +484,7 @@ func TestEnterpriseCreateWithPreset(t *testing.T) {
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer(db)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer()
|
||||
api.AGPL.PrebuildsClaimer.Store(&claimer)
|
||||
|
||||
// Given: a template and a template version where the preset defines values for all required parameters,
|
||||
|
||||
@@ -39,40 +39,44 @@ func (r *RootCmd) Server(_ func()) *serpent.Command {
|
||||
}
|
||||
}
|
||||
|
||||
// Always generate a mesh key, even if the built-in DERP server is
|
||||
// disabled. This mesh key is still used by workspace proxies running
|
||||
// HA.
|
||||
var meshKey string
|
||||
err := options.Database.InTx(func(tx database.Store) error {
|
||||
// This will block until the lock is acquired, and will be
|
||||
// automatically released when the transaction ends.
|
||||
err := tx.AcquireLock(ctx, database.LockIDEnterpriseDeploymentSetup)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("acquire lock: %w", err)
|
||||
}
|
||||
|
||||
meshKey, err = tx.GetDERPMeshKey(ctx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if !errors.Is(err, sql.ErrNoRows) {
|
||||
return xerrors.Errorf("get DERP mesh key: %w", err)
|
||||
}
|
||||
meshKey, err = cryptorand.String(32)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("generate DERP mesh key: %w", err)
|
||||
}
|
||||
err = tx.InsertDERPMeshKey(ctx, meshKey)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("insert DERP mesh key: %w", err)
|
||||
}
|
||||
return nil
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if meshKey == "" {
|
||||
return nil, nil, xerrors.New("mesh key is empty")
|
||||
}
|
||||
|
||||
if options.DeploymentValues.DERP.Server.Enable {
|
||||
options.DERPServer = derp.NewServer(key.NewNode(), tailnet.Logger(options.Logger.Named("derp")))
|
||||
var meshKey string
|
||||
err := options.Database.InTx(func(tx database.Store) error {
|
||||
// This will block until the lock is acquired, and will be
|
||||
// automatically released when the transaction ends.
|
||||
err := tx.AcquireLock(ctx, database.LockIDEnterpriseDeploymentSetup)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("acquire lock: %w", err)
|
||||
}
|
||||
|
||||
meshKey, err = tx.GetDERPMeshKey(ctx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if !errors.Is(err, sql.ErrNoRows) {
|
||||
return xerrors.Errorf("get DERP mesh key: %w", err)
|
||||
}
|
||||
meshKey, err = cryptorand.String(32)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("generate DERP mesh key: %w", err)
|
||||
}
|
||||
err = tx.InsertDERPMeshKey(ctx, meshKey)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("insert DERP mesh key: %w", err)
|
||||
}
|
||||
return nil
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if meshKey == "" {
|
||||
return nil, nil, xerrors.New("mesh key is empty")
|
||||
}
|
||||
options.DERPServer.SetMeshKey(meshKey)
|
||||
}
|
||||
|
||||
|
||||
@@ -981,7 +981,13 @@ func (api *API) updateEntitlements(ctx context.Context) error {
|
||||
|
||||
var _ wsbuilder.UsageChecker = &API{}
|
||||
|
||||
func (api *API) CheckBuildUsage(ctx context.Context, store database.Store, templateVersion *database.TemplateVersion, task *database.Task, transition database.WorkspaceTransition) (wsbuilder.UsageCheckResponse, error) {
|
||||
func (api *API) CheckBuildUsage(
|
||||
_ context.Context,
|
||||
_ database.Store,
|
||||
templateVersion *database.TemplateVersion,
|
||||
task *database.Task,
|
||||
transition database.WorkspaceTransition,
|
||||
) (wsbuilder.UsageCheckResponse, error) {
|
||||
// If the template version has an external agent, we need to check that the
|
||||
// license is entitled to this feature.
|
||||
if templateVersion.HasExternalAgent.Valid && templateVersion.HasExternalAgent.Bool {
|
||||
@@ -994,59 +1000,23 @@ func (api *API) CheckBuildUsage(ctx context.Context, store database.Store, templ
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := api.checkAIBuildUsage(ctx, store, task, transition)
|
||||
if err != nil {
|
||||
return wsbuilder.UsageCheckResponse{}, err
|
||||
}
|
||||
if !resp.Permitted {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
return wsbuilder.UsageCheckResponse{Permitted: true}, nil
|
||||
}
|
||||
|
||||
// checkAIBuildUsage validates AI-related usage constraints. It is a no-op
|
||||
// unless the transition is "start" and the template version has an AI task.
|
||||
func (api *API) checkAIBuildUsage(ctx context.Context, store database.Store, task *database.Task, transition database.WorkspaceTransition) (wsbuilder.UsageCheckResponse, error) {
|
||||
// Only check AI usage rules for start transitions.
|
||||
if transition != database.WorkspaceTransitionStart {
|
||||
// Verify managed agent entitlement for AI task builds.
|
||||
// The count/limit check is intentionally omitted — breaching the
|
||||
// limit is advisory only and surfaced as a warning via entitlements.
|
||||
if transition != database.WorkspaceTransitionStart || task == nil {
|
||||
return wsbuilder.UsageCheckResponse{Permitted: true}, nil
|
||||
}
|
||||
|
||||
// If the template version doesn't have an AI task, we don't need to check usage.
|
||||
if task == nil {
|
||||
if !api.Entitlements.HasLicense() {
|
||||
return wsbuilder.UsageCheckResponse{Permitted: true}, nil
|
||||
}
|
||||
|
||||
// When licensed, ensure we haven't breached the managed agent limit.
|
||||
// Unlicensed deployments are allowed to use unlimited managed agents.
|
||||
if api.Entitlements.HasLicense() {
|
||||
managedAgentLimit, ok := api.Entitlements.Feature(codersdk.FeatureManagedAgentLimit)
|
||||
if !ok || !managedAgentLimit.Enabled || managedAgentLimit.Limit == nil || managedAgentLimit.UsagePeriod == nil {
|
||||
return wsbuilder.UsageCheckResponse{
|
||||
Permitted: false,
|
||||
Message: "Your license is not entitled to managed agents. Please contact sales to continue using managed agents.",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// This check is intentionally not committed to the database. It's fine
|
||||
// if it's not 100% accurate or allows for minor breaches due to build
|
||||
// races.
|
||||
// nolint:gocritic // Requires permission to read all usage events.
|
||||
managedAgentCount, err := store.GetTotalUsageDCManagedAgentsV1(agpldbauthz.AsSystemRestricted(ctx), database.GetTotalUsageDCManagedAgentsV1Params{
|
||||
StartDate: managedAgentLimit.UsagePeriod.Start,
|
||||
EndDate: managedAgentLimit.UsagePeriod.End,
|
||||
})
|
||||
if err != nil {
|
||||
return wsbuilder.UsageCheckResponse{}, xerrors.Errorf("get managed agent count: %w", err)
|
||||
}
|
||||
|
||||
if managedAgentCount >= *managedAgentLimit.Limit {
|
||||
return wsbuilder.UsageCheckResponse{
|
||||
Permitted: false,
|
||||
Message: "You have breached the managed agent limit in your license. Please contact sales to continue using managed agents.",
|
||||
}, nil
|
||||
}
|
||||
managedAgentLimit, ok := api.Entitlements.Feature(codersdk.FeatureManagedAgentLimit)
|
||||
if !ok || !managedAgentLimit.Enabled {
|
||||
return wsbuilder.UsageCheckResponse{
|
||||
Permitted: false,
|
||||
Message: "Your license is not entitled to managed agents. Please contact sales to continue using managed agents.",
|
||||
}, nil
|
||||
}
|
||||
|
||||
return wsbuilder.UsageCheckResponse{Permitted: true}, nil
|
||||
@@ -1330,5 +1300,5 @@ func (api *API) setupPrebuilds(featureEnabled bool) (agplprebuilds.Reconciliatio
|
||||
api.TracerProvider,
|
||||
int(api.DeploymentValues.PostgresConnMaxOpen.Value()),
|
||||
)
|
||||
return reconciler, prebuilds.NewEnterpriseClaimer(api.Database)
|
||||
return reconciler, prebuilds.NewEnterpriseClaimer()
|
||||
}
|
||||
|
||||
@@ -633,7 +633,7 @@ func TestManagedAgentLimit(t *testing.T) {
|
||||
// expiry warnings.
|
||||
GraceAt: time.Now().Add(time.Hour * 24 * 60),
|
||||
ExpiresAt: time.Now().Add(time.Hour * 24 * 90),
|
||||
}).ManagedAgentLimit(1, 1),
|
||||
}).ManagedAgentLimit(1),
|
||||
})
|
||||
|
||||
// Get entitlements to check that the license is a-ok.
|
||||
@@ -644,11 +644,7 @@ func TestManagedAgentLimit(t *testing.T) {
|
||||
require.True(t, agentLimit.Enabled)
|
||||
require.NotNil(t, agentLimit.Limit)
|
||||
require.EqualValues(t, 1, *agentLimit.Limit)
|
||||
require.NotNil(t, agentLimit.SoftLimit)
|
||||
require.EqualValues(t, 1, *agentLimit.SoftLimit)
|
||||
require.Empty(t, sdkEntitlements.Errors)
|
||||
// There should be a warning since we're really close to our agent limit.
|
||||
require.Equal(t, sdkEntitlements.Warnings[0], "You are approaching the managed agent limit in your license. Please refer to the Deployment Licenses page for more information.")
|
||||
|
||||
// Create a fake provision response that claims there are agents in the
|
||||
// template and every built workspace.
|
||||
@@ -720,27 +716,32 @@ func TestManagedAgentLimit(t *testing.T) {
|
||||
require.NoError(t, err, "fetching AI workspace must succeed")
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, cli, workspace.LatestBuild.ID)
|
||||
|
||||
// Create a second AI workspace, which should fail.
|
||||
_, err = cli.CreateTask(ctx, owner.UserID.String(), codersdk.CreateTaskRequest{
|
||||
// Create a second AI task, which should succeed even though the limit is
|
||||
// breached. Managed agent limits are advisory only and should never block
|
||||
// workspace creation.
|
||||
task2, err := cli.CreateTask(ctx, owner.UserID.String(), codersdk.CreateTaskRequest{
|
||||
Name: namesgenerator.UniqueNameWith("-"),
|
||||
TemplateVersionID: aiTemplate.ActiveVersionID,
|
||||
TemplateVersionPresetID: uuid.Nil,
|
||||
Input: "hi",
|
||||
DisplayName: namesgenerator.UniqueName(),
|
||||
})
|
||||
require.ErrorContains(t, err, "You have breached the managed agent limit in your license")
|
||||
require.NoError(t, err, "creating task beyond managed agent limit must succeed")
|
||||
workspace2, err := cli.Workspace(ctx, task2.WorkspaceID.UUID)
|
||||
require.NoError(t, err, "fetching AI workspace must succeed")
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, cli, workspace2.LatestBuild.ID)
|
||||
|
||||
// Create a third non-AI workspace, which should succeed.
|
||||
workspace = coderdtest.CreateWorkspace(t, cli, noAiTemplate.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, cli, workspace.LatestBuild.ID)
|
||||
}
|
||||
|
||||
func TestCheckBuildUsage_SkipsAIForNonStartTransitions(t *testing.T) {
|
||||
func TestCheckBuildUsage_NeverBlocksOnManagedAgentLimit(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
// Prepare entitlements with a managed agent limit to enforce.
|
||||
// Prepare entitlements with a managed agent limit.
|
||||
entSet := entitlements.New()
|
||||
entSet.Modify(func(e *codersdk.Entitlements) {
|
||||
e.HasLicense = true
|
||||
@@ -776,32 +777,111 @@ func TestCheckBuildUsage_SkipsAIForNonStartTransitions(t *testing.T) {
|
||||
TemplateVersionID: tv.ID,
|
||||
}
|
||||
|
||||
// Mock DB: expect exactly one count call for the "start" transition.
|
||||
// Mock DB: no calls expected since managed agent limits are
|
||||
// advisory only and no longer query the database at build time.
|
||||
mDB := dbmock.NewMockStore(ctrl)
|
||||
mDB.EXPECT().
|
||||
GetTotalUsageDCManagedAgentsV1(gomock.Any(), gomock.Any()).
|
||||
Times(1).
|
||||
Return(int64(1), nil) // equal to limit -> should breach
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Start transition: should be not permitted due to limit breach.
|
||||
// Start transition: should be permitted even though the limit is
|
||||
// breached. Managed agent limits are advisory only.
|
||||
startResp, err := eapi.CheckBuildUsage(ctx, mDB, tv, task, database.WorkspaceTransitionStart)
|
||||
require.NoError(t, err)
|
||||
require.False(t, startResp.Permitted)
|
||||
require.Contains(t, startResp.Message, "breached the managed agent limit")
|
||||
require.True(t, startResp.Permitted)
|
||||
|
||||
// Stop transition: should be permitted and must not trigger additional DB calls.
|
||||
// Stop transition: should also be permitted.
|
||||
stopResp, err := eapi.CheckBuildUsage(ctx, mDB, tv, task, database.WorkspaceTransitionStop)
|
||||
require.NoError(t, err)
|
||||
require.True(t, stopResp.Permitted)
|
||||
|
||||
// Delete transition: should be permitted and must not trigger additional DB calls.
|
||||
// Delete transition: should also be permitted.
|
||||
deleteResp, err := eapi.CheckBuildUsage(ctx, mDB, tv, task, database.WorkspaceTransitionDelete)
|
||||
require.NoError(t, err)
|
||||
require.True(t, deleteResp.Permitted)
|
||||
}
|
||||
|
||||
func TestCheckBuildUsage_BlocksWithoutManagedAgentEntitlement(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tv := &database.TemplateVersion{
|
||||
HasAITask: sql.NullBool{Valid: true, Bool: true},
|
||||
HasExternalAgent: sql.NullBool{Valid: true, Bool: false},
|
||||
}
|
||||
task := &database.Task{
|
||||
TemplateVersionID: tv.ID,
|
||||
}
|
||||
|
||||
// Both "feature absent" and "feature explicitly disabled" should
|
||||
// block AI task builds on licensed deployments.
|
||||
tests := []struct {
|
||||
name string
|
||||
setupEnts func(e *codersdk.Entitlements)
|
||||
}{
|
||||
{
|
||||
name: "FeatureAbsent",
|
||||
setupEnts: func(e *codersdk.Entitlements) {
|
||||
e.HasLicense = true
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "FeatureDisabled",
|
||||
setupEnts: func(e *codersdk.Entitlements) {
|
||||
e.HasLicense = true
|
||||
e.Features[codersdk.FeatureManagedAgentLimit] = codersdk.Feature{
|
||||
Enabled: false,
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
entSet := entitlements.New()
|
||||
entSet.Modify(tc.setupEnts)
|
||||
|
||||
agpl := &agplcoderd.API{
|
||||
Options: &agplcoderd.Options{
|
||||
Entitlements: entSet,
|
||||
},
|
||||
}
|
||||
eapi := &coderd.API{
|
||||
AGPL: agpl,
|
||||
Options: &coderd.Options{Options: agpl.Options},
|
||||
}
|
||||
|
||||
mDB := dbmock.NewMockStore(ctrl)
|
||||
ctx := context.Background()
|
||||
|
||||
// Start transition with a task: should be blocked because the
|
||||
// license doesn't include the managed agent entitlement.
|
||||
resp, err := eapi.CheckBuildUsage(ctx, mDB, tv, task, database.WorkspaceTransitionStart)
|
||||
require.NoError(t, err)
|
||||
require.False(t, resp.Permitted)
|
||||
require.Contains(t, resp.Message, "not entitled to managed agents")
|
||||
|
||||
// Stop and delete transitions should still be permitted so
|
||||
// that existing workspaces can be stopped/cleaned up.
|
||||
stopResp, err := eapi.CheckBuildUsage(ctx, mDB, tv, task, database.WorkspaceTransitionStop)
|
||||
require.NoError(t, err)
|
||||
require.True(t, stopResp.Permitted)
|
||||
|
||||
deleteResp, err := eapi.CheckBuildUsage(ctx, mDB, tv, task, database.WorkspaceTransitionDelete)
|
||||
require.NoError(t, err)
|
||||
require.True(t, deleteResp.Permitted)
|
||||
|
||||
// Start transition without a task: should be permitted (not
|
||||
// an AI task build, so the entitlement check doesn't apply).
|
||||
noTaskResp, err := eapi.CheckBuildUsage(ctx, mDB, tv, nil, database.WorkspaceTransitionStart)
|
||||
require.NoError(t, err)
|
||||
require.True(t, noTaskResp.Permitted)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// testDBAuthzRole returns a context with a subject that has a role
|
||||
// with permissions required for test setup.
|
||||
func testDBAuthzRole(ctx context.Context) context.Context {
|
||||
|
||||
@@ -231,12 +231,8 @@ func (opts *LicenseOptions) AIGovernanceAddon(limit int64) *LicenseOptions {
|
||||
return opts.Feature(codersdk.FeatureAIGovernanceUserLimit, limit)
|
||||
}
|
||||
|
||||
func (opts *LicenseOptions) ManagedAgentLimit(soft int64, hard int64) *LicenseOptions {
|
||||
// These don't use named or exported feature names, see
|
||||
// enterprise/coderd/license/license.go.
|
||||
opts = opts.Feature(codersdk.FeatureName("managed_agent_limit_soft"), soft)
|
||||
opts = opts.Feature(codersdk.FeatureName("managed_agent_limit_hard"), hard)
|
||||
return opts
|
||||
func (opts *LicenseOptions) ManagedAgentLimit(limit int64) *LicenseOptions {
|
||||
return opts.Feature(codersdk.FeatureManagedAgentLimit, limit)
|
||||
}
|
||||
|
||||
func (opts *LicenseOptions) Feature(name codersdk.FeatureName, value int64) *LicenseOptions {
|
||||
|
||||
@@ -15,60 +15,9 @@ import (
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/util/ptr"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
const (
|
||||
// These features are only included in the license and are not actually
|
||||
// entitlements after the licenses are processed. These values will be
|
||||
// merged into the codersdk.FeatureManagedAgentLimit feature.
|
||||
//
|
||||
// The reason we need two separate features is because the License v3 format
|
||||
// uses map[string]int64 for features, so we're unable to use a single value
|
||||
// with a struct like `{"soft": 100, "hard": 200}`. This is unfortunate and
|
||||
// we should fix this with a new license format v4 in the future.
|
||||
//
|
||||
// These are intentionally not exported as they should not be used outside
|
||||
// of this package (except tests).
|
||||
featureManagedAgentLimitHard codersdk.FeatureName = "managed_agent_limit_hard"
|
||||
featureManagedAgentLimitSoft codersdk.FeatureName = "managed_agent_limit_soft"
|
||||
)
|
||||
|
||||
var (
|
||||
// Mapping of license feature names to the SDK feature name.
|
||||
// This is used to map from multiple usage period features into a single SDK
|
||||
// feature.
|
||||
featureGrouping = map[codersdk.FeatureName]struct {
|
||||
// The parent feature.
|
||||
sdkFeature codersdk.FeatureName
|
||||
// Whether the value of the license feature is the soft limit or the hard
|
||||
// limit.
|
||||
isSoft bool
|
||||
}{
|
||||
// Map featureManagedAgentLimitHard and featureManagedAgentLimitSoft to
|
||||
// codersdk.FeatureManagedAgentLimit.
|
||||
featureManagedAgentLimitHard: {
|
||||
sdkFeature: codersdk.FeatureManagedAgentLimit,
|
||||
isSoft: false,
|
||||
},
|
||||
featureManagedAgentLimitSoft: {
|
||||
sdkFeature: codersdk.FeatureManagedAgentLimit,
|
||||
isSoft: true,
|
||||
},
|
||||
}
|
||||
|
||||
// Features that are forbidden to be set in a license. These are the SDK
|
||||
// features in the usagedBasedFeatureGrouping map.
|
||||
licenseForbiddenFeatures = func() map[codersdk.FeatureName]struct{} {
|
||||
features := make(map[codersdk.FeatureName]struct{})
|
||||
for _, feature := range featureGrouping {
|
||||
features[feature.sdkFeature] = struct{}{}
|
||||
}
|
||||
return features
|
||||
}()
|
||||
)
|
||||
|
||||
// Entitlements processes licenses to return whether features are enabled or not.
|
||||
// TODO(@deansheather): This function and the related LicensesEntitlements
|
||||
// function should be refactored into smaller functions that:
|
||||
@@ -280,17 +229,15 @@ func LicensesEntitlements(
|
||||
// licenses with the corresponding features actually set
|
||||
// trump this default entitlement, even if they are set to a
|
||||
// smaller value.
|
||||
defaultManagedAgentsIsuedAt = time.Date(2025, 7, 1, 0, 0, 0, 0, time.UTC)
|
||||
defaultManagedAgentsStart = defaultManagedAgentsIsuedAt
|
||||
defaultManagedAgentsEnd = defaultManagedAgentsStart.AddDate(100, 0, 0)
|
||||
defaultManagedAgentsSoftLimit int64 = 1000
|
||||
defaultManagedAgentsHardLimit int64 = 1000
|
||||
defaultManagedAgentsIsuedAt = time.Date(2025, 7, 1, 0, 0, 0, 0, time.UTC)
|
||||
defaultManagedAgentsStart = defaultManagedAgentsIsuedAt
|
||||
defaultManagedAgentsEnd = defaultManagedAgentsStart.AddDate(100, 0, 0)
|
||||
defaultManagedAgentsLimit int64 = 1000
|
||||
)
|
||||
entitlements.AddFeature(codersdk.FeatureManagedAgentLimit, codersdk.Feature{
|
||||
Enabled: true,
|
||||
Entitlement: entitlement,
|
||||
SoftLimit: &defaultManagedAgentsSoftLimit,
|
||||
Limit: &defaultManagedAgentsHardLimit,
|
||||
Limit: &defaultManagedAgentsLimit,
|
||||
UsagePeriod: &codersdk.UsagePeriod{
|
||||
IssuedAt: defaultManagedAgentsIsuedAt,
|
||||
Start: defaultManagedAgentsStart,
|
||||
@@ -310,15 +257,6 @@ func LicensesEntitlements(
|
||||
|
||||
// Add all features from the feature set.
|
||||
for _, featureName := range claims.FeatureSet.Features() {
|
||||
if _, ok := licenseForbiddenFeatures[featureName]; ok {
|
||||
// Ignore any FeatureSet features that are forbidden to be set in a license.
|
||||
continue
|
||||
}
|
||||
if _, ok := featureGrouping[featureName]; ok {
|
||||
// These features need very special handling due to merging
|
||||
// multiple feature values into a single SDK feature.
|
||||
continue
|
||||
}
|
||||
if featureName.UsesLimit() || featureName.UsesUsagePeriod() {
|
||||
// Limit and usage period features are handled below.
|
||||
// They don't provide default values as they are always enabled
|
||||
@@ -335,30 +273,24 @@ func LicensesEntitlements(
|
||||
})
|
||||
}
|
||||
|
||||
// A map of SDK feature name to the uncommitted usage feature.
|
||||
uncommittedUsageFeatures := map[codersdk.FeatureName]usageLimit{}
|
||||
|
||||
// Features al-la-carte
|
||||
for featureName, featureValue := range claims.Features {
|
||||
if _, ok := licenseForbiddenFeatures[featureName]; ok {
|
||||
entitlements.Errors = append(entitlements.Errors,
|
||||
fmt.Sprintf("Feature %s is forbidden to be set in a license.", featureName))
|
||||
continue
|
||||
// Old-style licenses encode the managed agent limit as
|
||||
// separate soft/hard features.
|
||||
//
|
||||
// This could be removed in a future release, but can only be
|
||||
// done once all old licenses containing this are no longer in use.
|
||||
if featureName == "managed_agent_limit_soft" {
|
||||
// Maps the soft limit to the canonical feature name
|
||||
featureName = codersdk.FeatureManagedAgentLimit
|
||||
}
|
||||
if featureValue < 0 {
|
||||
// We currently don't use negative values for features.
|
||||
if featureName == "managed_agent_limit_hard" {
|
||||
// We can safely ignore the hard limit as it is no longer used.
|
||||
continue
|
||||
}
|
||||
|
||||
// Special handling for grouped (e.g. usage period) features.
|
||||
if grouping, ok := featureGrouping[featureName]; ok {
|
||||
ul := uncommittedUsageFeatures[grouping.sdkFeature]
|
||||
if grouping.isSoft {
|
||||
ul.Soft = &featureValue
|
||||
} else {
|
||||
ul.Hard = &featureValue
|
||||
}
|
||||
uncommittedUsageFeatures[grouping.sdkFeature] = ul
|
||||
if featureValue < 0 {
|
||||
// We currently don't use negative values for features.
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -372,6 +304,17 @@ func LicensesEntitlements(
|
||||
|
||||
// Handling for limit features.
|
||||
switch {
|
||||
case featureName.UsesUsagePeriod():
|
||||
entitlements.AddFeature(featureName, codersdk.Feature{
|
||||
Enabled: featureValue > 0,
|
||||
Entitlement: entitlement,
|
||||
Limit: &featureValue,
|
||||
UsagePeriod: &codersdk.UsagePeriod{
|
||||
IssuedAt: claims.IssuedAt.Time,
|
||||
Start: usagePeriodStart,
|
||||
End: usagePeriodEnd,
|
||||
},
|
||||
})
|
||||
case featureName.UsesLimit():
|
||||
if featureValue <= 0 {
|
||||
// 0 limit value or less doesn't make sense, so we skip it.
|
||||
@@ -402,46 +345,6 @@ func LicensesEntitlements(
|
||||
}
|
||||
}
|
||||
|
||||
// Apply uncommitted usage features to the entitlements.
|
||||
for featureName, ul := range uncommittedUsageFeatures {
|
||||
if ul.Soft == nil || ul.Hard == nil {
|
||||
// Invalid license.
|
||||
entitlements.Errors = append(entitlements.Errors,
|
||||
fmt.Sprintf("Invalid license (%s): feature %s has missing soft or hard limit values", license.UUID.String(), featureName))
|
||||
continue
|
||||
}
|
||||
if *ul.Hard < *ul.Soft {
|
||||
entitlements.Errors = append(entitlements.Errors,
|
||||
fmt.Sprintf("Invalid license (%s): feature %s has a hard limit less than the soft limit", license.UUID.String(), featureName))
|
||||
continue
|
||||
}
|
||||
if *ul.Hard < 0 || *ul.Soft < 0 {
|
||||
entitlements.Errors = append(entitlements.Errors,
|
||||
fmt.Sprintf("Invalid license (%s): feature %s has a soft or hard limit less than 0", license.UUID.String(), featureName))
|
||||
continue
|
||||
}
|
||||
|
||||
feature := codersdk.Feature{
|
||||
Enabled: true,
|
||||
Entitlement: entitlement,
|
||||
SoftLimit: ul.Soft,
|
||||
Limit: ul.Hard,
|
||||
// `Actual` will be populated below when warnings are generated.
|
||||
UsagePeriod: &codersdk.UsagePeriod{
|
||||
IssuedAt: claims.IssuedAt.Time,
|
||||
Start: usagePeriodStart,
|
||||
End: usagePeriodEnd,
|
||||
},
|
||||
}
|
||||
// If the hard limit is 0, the feature is disabled.
|
||||
if *ul.Hard <= 0 {
|
||||
feature.Enabled = false
|
||||
feature.SoftLimit = ptr.Ref(int64(0))
|
||||
feature.Limit = ptr.Ref(int64(0))
|
||||
}
|
||||
entitlements.AddFeature(featureName, feature)
|
||||
}
|
||||
|
||||
addonFeatures := make(map[codersdk.FeatureName]codersdk.Feature)
|
||||
|
||||
// Finally, add all features from the addons. We do this last so that
|
||||
@@ -557,32 +460,9 @@ func LicensesEntitlements(
|
||||
entitlements.AddFeature(codersdk.FeatureManagedAgentLimit, agentLimit)
|
||||
|
||||
// Only issue warnings if the feature is enabled.
|
||||
if agentLimit.Enabled {
|
||||
var softLimit int64
|
||||
if agentLimit.SoftLimit != nil {
|
||||
softLimit = *agentLimit.SoftLimit
|
||||
}
|
||||
var hardLimit int64
|
||||
if agentLimit.Limit != nil {
|
||||
hardLimit = *agentLimit.Limit
|
||||
}
|
||||
|
||||
// Issue a warning early:
|
||||
// 1. If the soft limit and hard limit are equal, at 75% of the hard
|
||||
// limit.
|
||||
// 2. If the limit is greater than the soft limit, at 75% of the
|
||||
// difference between the hard limit and the soft limit.
|
||||
softWarningThreshold := int64(float64(hardLimit) * 0.75)
|
||||
if hardLimit > softLimit && softLimit > 0 {
|
||||
softWarningThreshold = softLimit + int64(float64(hardLimit-softLimit)*0.75)
|
||||
}
|
||||
if managedAgentCount >= *agentLimit.Limit {
|
||||
entitlements.Warnings = append(entitlements.Warnings,
|
||||
"You have built more workspaces with managed agents than your license allows. Further managed agent builds will be blocked.")
|
||||
} else if managedAgentCount >= softWarningThreshold {
|
||||
entitlements.Warnings = append(entitlements.Warnings,
|
||||
"You are approaching the managed agent limit in your license. Please refer to the Deployment Licenses page for more information.")
|
||||
}
|
||||
if agentLimit.Enabled && agentLimit.Limit != nil && managedAgentCount >= *agentLimit.Limit {
|
||||
entitlements.Warnings = append(entitlements.Warnings,
|
||||
codersdk.LicenseManagedAgentLimitExceededWarningText)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -683,11 +563,6 @@ var (
|
||||
|
||||
type Features map[codersdk.FeatureName]int64
|
||||
|
||||
type usageLimit struct {
|
||||
Soft *int64
|
||||
Hard *int64 // 0 means "disabled"
|
||||
}
|
||||
|
||||
// Claims is the full set of claims in a license.
|
||||
type Claims struct {
|
||||
jwt.RegisteredClaims
|
||||
|
||||
@@ -76,8 +76,7 @@ func TestEntitlements(t *testing.T) {
|
||||
f := make(license.Features)
|
||||
for _, name := range codersdk.FeatureNames {
|
||||
if name == codersdk.FeatureManagedAgentLimit {
|
||||
f[codersdk.FeatureName("managed_agent_limit_soft")] = 100
|
||||
f[codersdk.FeatureName("managed_agent_limit_hard")] = 200
|
||||
f[codersdk.FeatureManagedAgentLimit] = 100
|
||||
continue
|
||||
}
|
||||
f[name] = 1
|
||||
@@ -533,8 +532,7 @@ func TestEntitlements(t *testing.T) {
|
||||
t.Run("Premium", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
const userLimit = 1
|
||||
const expectedAgentSoftLimit = 1000
|
||||
const expectedAgentHardLimit = 1000
|
||||
const expectedAgentLimit = 1000
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
licenseOptions := coderdenttest.LicenseOptions{
|
||||
@@ -566,8 +564,7 @@ func TestEntitlements(t *testing.T) {
|
||||
agentEntitlement := entitlements.Features[featureName]
|
||||
require.True(t, agentEntitlement.Enabled)
|
||||
require.Equal(t, codersdk.EntitlementEntitled, agentEntitlement.Entitlement)
|
||||
require.EqualValues(t, expectedAgentSoftLimit, *agentEntitlement.SoftLimit)
|
||||
require.EqualValues(t, expectedAgentHardLimit, *agentEntitlement.Limit)
|
||||
require.EqualValues(t, expectedAgentLimit, *agentEntitlement.Limit)
|
||||
|
||||
// This might be shocking, but there's a sound reason for this.
|
||||
// See license.go for more details.
|
||||
@@ -840,7 +837,7 @@ func TestEntitlements(t *testing.T) {
|
||||
},
|
||||
}).
|
||||
UserLimit(100).
|
||||
ManagedAgentLimit(100, 200)
|
||||
ManagedAgentLimit(100)
|
||||
|
||||
lic := database.License{
|
||||
ID: 1,
|
||||
@@ -882,16 +879,15 @@ func TestEntitlements(t *testing.T) {
|
||||
managedAgentLimit, ok := entitlements.Features[codersdk.FeatureManagedAgentLimit]
|
||||
require.True(t, ok)
|
||||
|
||||
require.NotNil(t, managedAgentLimit.SoftLimit)
|
||||
require.EqualValues(t, 100, *managedAgentLimit.SoftLimit)
|
||||
require.NotNil(t, managedAgentLimit.Limit)
|
||||
require.EqualValues(t, 200, *managedAgentLimit.Limit)
|
||||
// The soft limit value (100) is used as the single Limit.
|
||||
require.EqualValues(t, 100, *managedAgentLimit.Limit)
|
||||
require.NotNil(t, managedAgentLimit.Actual)
|
||||
require.EqualValues(t, 175, *managedAgentLimit.Actual)
|
||||
|
||||
// Should've also populated a warning.
|
||||
// Usage exceeds the limit, so an exceeded warning should be present.
|
||||
require.Len(t, entitlements.Warnings, 1)
|
||||
require.Equal(t, "You are approaching the managed agent limit in your license. Please refer to the Deployment Licenses page for more information.", entitlements.Warnings[0])
|
||||
require.Equal(t, codersdk.LicenseManagedAgentLimitExceededWarningText, entitlements.Warnings[0])
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1121,13 +1117,12 @@ func TestLicenseEntitlements(t *testing.T) {
|
||||
{
|
||||
Name: "ManagedAgentLimit",
|
||||
Licenses: []*coderdenttest.LicenseOptions{
|
||||
enterpriseLicense().UserLimit(100).ManagedAgentLimit(100, 200),
|
||||
enterpriseLicense().UserLimit(100).ManagedAgentLimit(100),
|
||||
},
|
||||
Arguments: license.FeatureArguments{
|
||||
ManagedAgentCountFn: func(ctx context.Context, from time.Time, to time.Time) (int64, error) {
|
||||
// 175 will generate a warning as it's over 75% of the
|
||||
// difference between the soft and hard limit.
|
||||
return 174, nil
|
||||
// 74 is below the limit (soft=100), so no warning.
|
||||
return 74, nil
|
||||
},
|
||||
},
|
||||
AssertEntitlements: func(t *testing.T, entitlements codersdk.Entitlements) {
|
||||
@@ -1136,9 +1131,9 @@ func TestLicenseEntitlements(t *testing.T) {
|
||||
feature := entitlements.Features[codersdk.FeatureManagedAgentLimit]
|
||||
assert.Equal(t, codersdk.EntitlementEntitled, feature.Entitlement)
|
||||
assert.True(t, feature.Enabled)
|
||||
assert.Equal(t, int64(100), *feature.SoftLimit)
|
||||
assert.Equal(t, int64(200), *feature.Limit)
|
||||
assert.Equal(t, int64(174), *feature.Actual)
|
||||
// Soft limit value is used as the single Limit.
|
||||
assert.Equal(t, int64(100), *feature.Limit)
|
||||
assert.Equal(t, int64(74), *feature.Actual)
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -1151,7 +1146,7 @@ func TestLicenseEntitlements(t *testing.T) {
|
||||
WithIssuedAt(time.Now().Add(-time.Hour * 2)),
|
||||
enterpriseLicense().
|
||||
UserLimit(100).
|
||||
ManagedAgentLimit(100, 100).
|
||||
ManagedAgentLimit(100).
|
||||
WithIssuedAt(time.Now().Add(-time.Hour * 1)).
|
||||
GracePeriod(time.Now()),
|
||||
},
|
||||
@@ -1168,7 +1163,6 @@ func TestLicenseEntitlements(t *testing.T) {
|
||||
feature := entitlements.Features[codersdk.FeatureManagedAgentLimit]
|
||||
assert.Equal(t, codersdk.EntitlementGracePeriod, feature.Entitlement)
|
||||
assert.True(t, feature.Enabled)
|
||||
assert.Equal(t, int64(100), *feature.SoftLimit)
|
||||
assert.Equal(t, int64(100), *feature.Limit)
|
||||
assert.Equal(t, int64(74), *feature.Actual)
|
||||
},
|
||||
@@ -1183,7 +1177,7 @@ func TestLicenseEntitlements(t *testing.T) {
|
||||
WithIssuedAt(time.Now().Add(-time.Hour * 2)),
|
||||
enterpriseLicense().
|
||||
UserLimit(100).
|
||||
ManagedAgentLimit(100, 200).
|
||||
ManagedAgentLimit(100).
|
||||
WithIssuedAt(time.Now().Add(-time.Hour * 1)).
|
||||
Expired(time.Now()),
|
||||
},
|
||||
@@ -1196,84 +1190,33 @@ func TestLicenseEntitlements(t *testing.T) {
|
||||
feature := entitlements.Features[codersdk.FeatureManagedAgentLimit]
|
||||
assert.Equal(t, codersdk.EntitlementNotEntitled, feature.Entitlement)
|
||||
assert.False(t, feature.Enabled)
|
||||
assert.Nil(t, feature.SoftLimit)
|
||||
assert.Nil(t, feature.Limit)
|
||||
assert.Nil(t, feature.Actual)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "ManagedAgentLimitWarning/ApproachingLimit/DifferentSoftAndHardLimit",
|
||||
Name: "ManagedAgentLimitWarning/ExceededLimit",
|
||||
Licenses: []*coderdenttest.LicenseOptions{
|
||||
enterpriseLicense().
|
||||
UserLimit(100).
|
||||
ManagedAgentLimit(100, 200),
|
||||
ManagedAgentLimit(100),
|
||||
},
|
||||
Arguments: license.FeatureArguments{
|
||||
ManagedAgentCountFn: func(ctx context.Context, from time.Time, to time.Time) (int64, error) {
|
||||
return 175, nil
|
||||
return 150, nil
|
||||
},
|
||||
},
|
||||
AssertEntitlements: func(t *testing.T, entitlements codersdk.Entitlements) {
|
||||
assert.Len(t, entitlements.Warnings, 1)
|
||||
assert.Equal(t, "You are approaching the managed agent limit in your license. Please refer to the Deployment Licenses page for more information.", entitlements.Warnings[0])
|
||||
assert.Equal(t, codersdk.LicenseManagedAgentLimitExceededWarningText, entitlements.Warnings[0])
|
||||
assertNoErrors(t, entitlements)
|
||||
|
||||
feature := entitlements.Features[codersdk.FeatureManagedAgentLimit]
|
||||
assert.Equal(t, codersdk.EntitlementEntitled, feature.Entitlement)
|
||||
assert.True(t, feature.Enabled)
|
||||
assert.Equal(t, int64(100), *feature.SoftLimit)
|
||||
assert.Equal(t, int64(200), *feature.Limit)
|
||||
assert.Equal(t, int64(175), *feature.Actual)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "ManagedAgentLimitWarning/ApproachingLimit/EqualSoftAndHardLimit",
|
||||
Licenses: []*coderdenttest.LicenseOptions{
|
||||
enterpriseLicense().
|
||||
UserLimit(100).
|
||||
ManagedAgentLimit(100, 100),
|
||||
},
|
||||
Arguments: license.FeatureArguments{
|
||||
ManagedAgentCountFn: func(ctx context.Context, from time.Time, to time.Time) (int64, error) {
|
||||
return 75, nil
|
||||
},
|
||||
},
|
||||
AssertEntitlements: func(t *testing.T, entitlements codersdk.Entitlements) {
|
||||
assert.Len(t, entitlements.Warnings, 1)
|
||||
assert.Equal(t, "You are approaching the managed agent limit in your license. Please refer to the Deployment Licenses page for more information.", entitlements.Warnings[0])
|
||||
assertNoErrors(t, entitlements)
|
||||
|
||||
feature := entitlements.Features[codersdk.FeatureManagedAgentLimit]
|
||||
assert.Equal(t, codersdk.EntitlementEntitled, feature.Entitlement)
|
||||
assert.True(t, feature.Enabled)
|
||||
assert.Equal(t, int64(100), *feature.SoftLimit)
|
||||
// Soft limit (100) is used as the single Limit.
|
||||
assert.Equal(t, int64(100), *feature.Limit)
|
||||
assert.Equal(t, int64(75), *feature.Actual)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "ManagedAgentLimitWarning/BreachedLimit",
|
||||
Licenses: []*coderdenttest.LicenseOptions{
|
||||
enterpriseLicense().
|
||||
UserLimit(100).
|
||||
ManagedAgentLimit(100, 200),
|
||||
},
|
||||
Arguments: license.FeatureArguments{
|
||||
ManagedAgentCountFn: func(ctx context.Context, from time.Time, to time.Time) (int64, error) {
|
||||
return 200, nil
|
||||
},
|
||||
},
|
||||
AssertEntitlements: func(t *testing.T, entitlements codersdk.Entitlements) {
|
||||
assert.Len(t, entitlements.Warnings, 1)
|
||||
assert.Equal(t, "You have built more workspaces with managed agents than your license allows. Further managed agent builds will be blocked.", entitlements.Warnings[0])
|
||||
assertNoErrors(t, entitlements)
|
||||
|
||||
feature := entitlements.Features[codersdk.FeatureManagedAgentLimit]
|
||||
assert.Equal(t, codersdk.EntitlementEntitled, feature.Entitlement)
|
||||
assert.True(t, feature.Enabled)
|
||||
assert.Equal(t, int64(100), *feature.SoftLimit)
|
||||
assert.Equal(t, int64(200), *feature.Limit)
|
||||
assert.Equal(t, int64(200), *feature.Actual)
|
||||
assert.Equal(t, int64(150), *feature.Actual)
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -1472,173 +1415,240 @@ func TestAIBridgeSoftWarning(t *testing.T) {
|
||||
func TestUsageLimitFeatures(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cases := []struct {
|
||||
sdkFeatureName codersdk.FeatureName
|
||||
softLimitFeatureName codersdk.FeatureName
|
||||
hardLimitFeatureName codersdk.FeatureName
|
||||
}{
|
||||
{
|
||||
sdkFeatureName: codersdk.FeatureManagedAgentLimit,
|
||||
softLimitFeatureName: codersdk.FeatureName("managed_agent_limit_soft"),
|
||||
hardLimitFeatureName: codersdk.FeatureName("managed_agent_limit_hard"),
|
||||
},
|
||||
}
|
||||
// Ensures that usage limit features are ranked by issued at, not by
|
||||
// values.
|
||||
t.Run("IssuedAtRanking", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(string(c.sdkFeatureName), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Generate 2 real licenses both with managed agent limit
|
||||
// features. lic2 should trump lic1 even though it has a lower
|
||||
// limit, because it was issued later.
|
||||
lic1 := database.License{
|
||||
ID: 1,
|
||||
UploadedAt: time.Now(),
|
||||
Exp: time.Now().Add(time.Hour),
|
||||
UUID: uuid.New(),
|
||||
JWT: coderdenttest.GenerateLicense(t, coderdenttest.LicenseOptions{
|
||||
IssuedAt: time.Now().Add(-time.Minute * 2),
|
||||
NotBefore: time.Now().Add(-time.Minute * 2),
|
||||
ExpiresAt: time.Now().Add(time.Hour * 2),
|
||||
Features: license.Features{
|
||||
codersdk.FeatureManagedAgentLimit: 100,
|
||||
},
|
||||
}),
|
||||
}
|
||||
lic2Iat := time.Now().Add(-time.Minute * 1)
|
||||
lic2Nbf := lic2Iat.Add(-time.Minute)
|
||||
lic2Exp := lic2Iat.Add(time.Hour)
|
||||
lic2 := database.License{
|
||||
ID: 2,
|
||||
UploadedAt: time.Now(),
|
||||
Exp: lic2Exp,
|
||||
UUID: uuid.New(),
|
||||
JWT: coderdenttest.GenerateLicense(t, coderdenttest.LicenseOptions{
|
||||
IssuedAt: lic2Iat,
|
||||
NotBefore: lic2Nbf,
|
||||
ExpiresAt: lic2Exp,
|
||||
Features: license.Features{
|
||||
codersdk.FeatureManagedAgentLimit: 50,
|
||||
},
|
||||
}),
|
||||
}
|
||||
|
||||
// Test for either a missing soft or hard limit feature value.
|
||||
t.Run("MissingGroupedFeature", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
const actualAgents = 10
|
||||
arguments := license.FeatureArguments{
|
||||
ActiveUserCount: 10,
|
||||
ReplicaCount: 0,
|
||||
ExternalAuthCount: 0,
|
||||
ManagedAgentCountFn: func(ctx context.Context, from time.Time, to time.Time) (int64, error) {
|
||||
return actualAgents, nil
|
||||
},
|
||||
}
|
||||
|
||||
for _, feature := range []codersdk.FeatureName{
|
||||
c.softLimitFeatureName,
|
||||
c.hardLimitFeatureName,
|
||||
} {
|
||||
t.Run(string(feature), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Load the licenses in both orders to ensure the correct
|
||||
// behavior is observed no matter the order.
|
||||
for _, order := range [][]database.License{
|
||||
{lic1, lic2},
|
||||
{lic2, lic1},
|
||||
} {
|
||||
entitlements, err := license.LicensesEntitlements(context.Background(), time.Now(), order, map[codersdk.FeatureName]bool{}, coderdenttest.Keys, arguments)
|
||||
require.NoError(t, err)
|
||||
|
||||
lic := database.License{
|
||||
ID: 1,
|
||||
UploadedAt: time.Now(),
|
||||
Exp: time.Now().Add(time.Hour),
|
||||
UUID: uuid.New(),
|
||||
JWT: coderdenttest.GenerateLicense(t, coderdenttest.LicenseOptions{
|
||||
Features: license.Features{
|
||||
feature: 100,
|
||||
},
|
||||
}),
|
||||
}
|
||||
feature, ok := entitlements.Features[codersdk.FeatureManagedAgentLimit]
|
||||
require.True(t, ok, "feature %s not found", codersdk.FeatureManagedAgentLimit)
|
||||
require.Equal(t, codersdk.EntitlementEntitled, feature.Entitlement)
|
||||
require.NotNil(t, feature.Limit)
|
||||
require.EqualValues(t, 50, *feature.Limit)
|
||||
require.NotNil(t, feature.Actual)
|
||||
require.EqualValues(t, actualAgents, *feature.Actual)
|
||||
require.NotNil(t, feature.UsagePeriod)
|
||||
require.WithinDuration(t, lic2Iat, feature.UsagePeriod.IssuedAt, 2*time.Second)
|
||||
require.WithinDuration(t, lic2Nbf, feature.UsagePeriod.Start, 2*time.Second)
|
||||
require.WithinDuration(t, lic2Exp, feature.UsagePeriod.End, 2*time.Second)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
arguments := license.FeatureArguments{
|
||||
ManagedAgentCountFn: func(ctx context.Context, from time.Time, to time.Time) (int64, error) {
|
||||
return 0, nil
|
||||
},
|
||||
}
|
||||
entitlements, err := license.LicensesEntitlements(context.Background(), time.Now(), []database.License{lic}, map[codersdk.FeatureName]bool{}, coderdenttest.Keys, arguments)
|
||||
require.NoError(t, err)
|
||||
// TestOldStyleManagedAgentLicenses ensures backward compatibility with
|
||||
// older licenses that encode the managed agent limit using separate
|
||||
// "managed_agent_limit_soft" and "managed_agent_limit_hard" feature keys
|
||||
// instead of the canonical "managed_agent_limit" key.
|
||||
func TestOldStyleManagedAgentLicenses(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
feature, ok := entitlements.Features[c.sdkFeatureName]
|
||||
require.True(t, ok, "feature %s not found", c.sdkFeatureName)
|
||||
require.Equal(t, codersdk.EntitlementNotEntitled, feature.Entitlement)
|
||||
t.Run("SoftAndHard", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
require.Len(t, entitlements.Errors, 1)
|
||||
require.Equal(t, fmt.Sprintf("Invalid license (%v): feature %s has missing soft or hard limit values", lic.UUID, c.sdkFeatureName), entitlements.Errors[0])
|
||||
})
|
||||
}
|
||||
})
|
||||
lic := database.License{
|
||||
ID: 1,
|
||||
UploadedAt: time.Now(),
|
||||
Exp: time.Now().Add(time.Hour),
|
||||
UUID: uuid.New(),
|
||||
JWT: coderdenttest.GenerateLicense(t, coderdenttest.LicenseOptions{
|
||||
Features: license.Features{
|
||||
codersdk.FeatureName("managed_agent_limit_soft"): 100,
|
||||
codersdk.FeatureName("managed_agent_limit_hard"): 200,
|
||||
},
|
||||
}),
|
||||
}
|
||||
|
||||
t.Run("HardBelowSoft", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
const actualAgents = 42
|
||||
arguments := license.FeatureArguments{
|
||||
ManagedAgentCountFn: func(_ context.Context, _, _ time.Time) (int64, error) {
|
||||
return actualAgents, nil
|
||||
},
|
||||
}
|
||||
|
||||
lic := database.License{
|
||||
ID: 1,
|
||||
UploadedAt: time.Now(),
|
||||
Exp: time.Now().Add(time.Hour),
|
||||
UUID: uuid.New(),
|
||||
JWT: coderdenttest.GenerateLicense(t, coderdenttest.LicenseOptions{
|
||||
Features: license.Features{
|
||||
c.softLimitFeatureName: 100,
|
||||
c.hardLimitFeatureName: 50,
|
||||
},
|
||||
}),
|
||||
}
|
||||
entitlements, err := license.LicensesEntitlements(
|
||||
context.Background(), time.Now(), []database.License{lic},
|
||||
map[codersdk.FeatureName]bool{}, coderdenttest.Keys, arguments,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, entitlements.Errors)
|
||||
|
||||
arguments := license.FeatureArguments{
|
||||
ManagedAgentCountFn: func(ctx context.Context, from time.Time, to time.Time) (int64, error) {
|
||||
return 0, nil
|
||||
},
|
||||
}
|
||||
entitlements, err := license.LicensesEntitlements(context.Background(), time.Now(), []database.License{lic}, map[codersdk.FeatureName]bool{}, coderdenttest.Keys, arguments)
|
||||
require.NoError(t, err)
|
||||
feature := entitlements.Features[codersdk.FeatureManagedAgentLimit]
|
||||
require.Equal(t, codersdk.EntitlementEntitled, feature.Entitlement)
|
||||
require.True(t, feature.Enabled)
|
||||
require.NotNil(t, feature.Limit)
|
||||
// The soft limit should be used as the canonical limit.
|
||||
require.EqualValues(t, 100, *feature.Limit)
|
||||
require.NotNil(t, feature.Actual)
|
||||
require.EqualValues(t, actualAgents, *feature.Actual)
|
||||
require.NotNil(t, feature.UsagePeriod)
|
||||
})
|
||||
|
||||
feature, ok := entitlements.Features[c.sdkFeatureName]
|
||||
require.True(t, ok, "feature %s not found", c.sdkFeatureName)
|
||||
require.Equal(t, codersdk.EntitlementNotEntitled, feature.Entitlement)
|
||||
t.Run("OnlySoft", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
require.Len(t, entitlements.Errors, 1)
|
||||
require.Equal(t, fmt.Sprintf("Invalid license (%v): feature %s has a hard limit less than the soft limit", lic.UUID, c.sdkFeatureName), entitlements.Errors[0])
|
||||
})
|
||||
lic := database.License{
|
||||
ID: 1,
|
||||
UploadedAt: time.Now(),
|
||||
Exp: time.Now().Add(time.Hour),
|
||||
UUID: uuid.New(),
|
||||
JWT: coderdenttest.GenerateLicense(t, coderdenttest.LicenseOptions{
|
||||
Features: license.Features{
|
||||
codersdk.FeatureName("managed_agent_limit_soft"): 75,
|
||||
},
|
||||
}),
|
||||
}
|
||||
|
||||
// Ensures that these features are ranked by issued at, not by
|
||||
// values.
|
||||
t.Run("IssuedAtRanking", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
const actualAgents = 10
|
||||
arguments := license.FeatureArguments{
|
||||
ManagedAgentCountFn: func(_ context.Context, _, _ time.Time) (int64, error) {
|
||||
return actualAgents, nil
|
||||
},
|
||||
}
|
||||
|
||||
// Generate 2 real licenses both with managed agent limit
|
||||
// features. lic2 should trump lic1 even though it has a lower
|
||||
// limit, because it was issued later.
|
||||
lic1 := database.License{
|
||||
ID: 1,
|
||||
UploadedAt: time.Now(),
|
||||
Exp: time.Now().Add(time.Hour),
|
||||
UUID: uuid.New(),
|
||||
JWT: coderdenttest.GenerateLicense(t, coderdenttest.LicenseOptions{
|
||||
IssuedAt: time.Now().Add(-time.Minute * 2),
|
||||
NotBefore: time.Now().Add(-time.Minute * 2),
|
||||
ExpiresAt: time.Now().Add(time.Hour * 2),
|
||||
Features: license.Features{
|
||||
c.softLimitFeatureName: 100,
|
||||
c.hardLimitFeatureName: 200,
|
||||
},
|
||||
}),
|
||||
}
|
||||
lic2Iat := time.Now().Add(-time.Minute * 1)
|
||||
lic2Nbf := lic2Iat.Add(-time.Minute)
|
||||
lic2Exp := lic2Iat.Add(time.Hour)
|
||||
lic2 := database.License{
|
||||
ID: 2,
|
||||
UploadedAt: time.Now(),
|
||||
Exp: lic2Exp,
|
||||
UUID: uuid.New(),
|
||||
JWT: coderdenttest.GenerateLicense(t, coderdenttest.LicenseOptions{
|
||||
IssuedAt: lic2Iat,
|
||||
NotBefore: lic2Nbf,
|
||||
ExpiresAt: lic2Exp,
|
||||
Features: license.Features{
|
||||
c.softLimitFeatureName: 50,
|
||||
c.hardLimitFeatureName: 100,
|
||||
},
|
||||
}),
|
||||
}
|
||||
entitlements, err := license.LicensesEntitlements(
|
||||
context.Background(), time.Now(), []database.License{lic},
|
||||
map[codersdk.FeatureName]bool{}, coderdenttest.Keys, arguments,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, entitlements.Errors)
|
||||
|
||||
const actualAgents = 10
|
||||
arguments := license.FeatureArguments{
|
||||
ActiveUserCount: 10,
|
||||
ReplicaCount: 0,
|
||||
ExternalAuthCount: 0,
|
||||
ManagedAgentCountFn: func(ctx context.Context, from time.Time, to time.Time) (int64, error) {
|
||||
return actualAgents, nil
|
||||
},
|
||||
}
|
||||
feature := entitlements.Features[codersdk.FeatureManagedAgentLimit]
|
||||
require.Equal(t, codersdk.EntitlementEntitled, feature.Entitlement)
|
||||
require.True(t, feature.Enabled)
|
||||
require.NotNil(t, feature.Limit)
|
||||
require.EqualValues(t, 75, *feature.Limit)
|
||||
})
|
||||
|
||||
// Load the licenses in both orders to ensure the correct
|
||||
// behavior is observed no matter the order.
|
||||
for _, order := range [][]database.License{
|
||||
{lic1, lic2},
|
||||
{lic2, lic1},
|
||||
} {
|
||||
entitlements, err := license.LicensesEntitlements(context.Background(), time.Now(), order, map[codersdk.FeatureName]bool{}, coderdenttest.Keys, arguments)
|
||||
require.NoError(t, err)
|
||||
// A license with only the hard limit key should silently ignore it,
|
||||
// leaving the feature unset (not entitled).
|
||||
t.Run("OnlyHard", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
feature, ok := entitlements.Features[c.sdkFeatureName]
|
||||
require.True(t, ok, "feature %s not found", c.sdkFeatureName)
|
||||
require.Equal(t, codersdk.EntitlementEntitled, feature.Entitlement)
|
||||
require.NotNil(t, feature.Limit)
|
||||
require.EqualValues(t, 100, *feature.Limit)
|
||||
require.NotNil(t, feature.SoftLimit)
|
||||
require.EqualValues(t, 50, *feature.SoftLimit)
|
||||
require.NotNil(t, feature.Actual)
|
||||
require.EqualValues(t, actualAgents, *feature.Actual)
|
||||
require.NotNil(t, feature.UsagePeriod)
|
||||
require.WithinDuration(t, lic2Iat, feature.UsagePeriod.IssuedAt, 2*time.Second)
|
||||
require.WithinDuration(t, lic2Nbf, feature.UsagePeriod.Start, 2*time.Second)
|
||||
require.WithinDuration(t, lic2Exp, feature.UsagePeriod.End, 2*time.Second)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
lic := database.License{
|
||||
ID: 1,
|
||||
UploadedAt: time.Now(),
|
||||
Exp: time.Now().Add(time.Hour),
|
||||
UUID: uuid.New(),
|
||||
JWT: coderdenttest.GenerateLicense(t, coderdenttest.LicenseOptions{
|
||||
Features: license.Features{
|
||||
codersdk.FeatureName("managed_agent_limit_hard"): 200,
|
||||
},
|
||||
}),
|
||||
}
|
||||
|
||||
arguments := license.FeatureArguments{
|
||||
ManagedAgentCountFn: func(_ context.Context, _, _ time.Time) (int64, error) {
|
||||
return 0, nil
|
||||
},
|
||||
}
|
||||
|
||||
entitlements, err := license.LicensesEntitlements(
|
||||
context.Background(), time.Now(), []database.License{lic},
|
||||
map[codersdk.FeatureName]bool{}, coderdenttest.Keys, arguments,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, entitlements.Errors)
|
||||
|
||||
feature := entitlements.Features[codersdk.FeatureManagedAgentLimit]
|
||||
require.Equal(t, codersdk.EntitlementNotEntitled, feature.Entitlement)
|
||||
})
|
||||
|
||||
// Old-style license with both soft and hard set to zero should
|
||||
// explicitly disable the feature (and override any Premium default).
|
||||
t.Run("ExplicitZero", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
lic := database.License{
|
||||
ID: 1,
|
||||
UploadedAt: time.Now(),
|
||||
Exp: time.Now().Add(time.Hour),
|
||||
UUID: uuid.New(),
|
||||
JWT: coderdenttest.GenerateLicense(t, coderdenttest.LicenseOptions{
|
||||
FeatureSet: codersdk.FeatureSetPremium,
|
||||
Features: license.Features{
|
||||
codersdk.FeatureUserLimit: 100,
|
||||
codersdk.FeatureName("managed_agent_limit_soft"): 0,
|
||||
codersdk.FeatureName("managed_agent_limit_hard"): 0,
|
||||
},
|
||||
}),
|
||||
}
|
||||
|
||||
const actualAgents = 5
|
||||
arguments := license.FeatureArguments{
|
||||
ActiveUserCount: 10,
|
||||
ManagedAgentCountFn: func(_ context.Context, _, _ time.Time) (int64, error) {
|
||||
return actualAgents, nil
|
||||
},
|
||||
}
|
||||
|
||||
entitlements, err := license.LicensesEntitlements(
|
||||
context.Background(), time.Now(), []database.License{lic},
|
||||
map[codersdk.FeatureName]bool{}, coderdenttest.Keys, arguments,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
feature := entitlements.Features[codersdk.FeatureManagedAgentLimit]
|
||||
require.Equal(t, codersdk.EntitlementEntitled, feature.Entitlement)
|
||||
require.False(t, feature.Enabled)
|
||||
require.NotNil(t, feature.Limit)
|
||||
require.EqualValues(t, 0, *feature.Limit)
|
||||
require.NotNil(t, feature.Actual)
|
||||
require.EqualValues(t, actualAgents, *feature.Actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestManagedAgentLimitDefault(t *testing.T) {
|
||||
@@ -1676,20 +1686,16 @@ func TestManagedAgentLimitDefault(t *testing.T) {
|
||||
require.True(t, ok, "feature %s not found", codersdk.FeatureManagedAgentLimit)
|
||||
require.Equal(t, codersdk.EntitlementNotEntitled, feature.Entitlement)
|
||||
require.Nil(t, feature.Limit)
|
||||
require.Nil(t, feature.SoftLimit)
|
||||
require.Nil(t, feature.Actual)
|
||||
require.Nil(t, feature.UsagePeriod)
|
||||
})
|
||||
|
||||
// "Premium" licenses should receive a default managed agent limit of:
|
||||
// soft = 1000
|
||||
// hard = 1000
|
||||
// "Premium" licenses should receive a default managed agent limit of 1000.
|
||||
t.Run("Premium", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const userLimit = 33
|
||||
const softLimit = 1000
|
||||
const hardLimit = 1000
|
||||
const defaultLimit = 1000
|
||||
lic := database.License{
|
||||
ID: 1,
|
||||
UploadedAt: time.Now(),
|
||||
@@ -1720,9 +1726,7 @@ func TestManagedAgentLimitDefault(t *testing.T) {
|
||||
require.True(t, ok, "feature %s not found", codersdk.FeatureManagedAgentLimit)
|
||||
require.Equal(t, codersdk.EntitlementEntitled, feature.Entitlement)
|
||||
require.NotNil(t, feature.Limit)
|
||||
require.EqualValues(t, hardLimit, *feature.Limit)
|
||||
require.NotNil(t, feature.SoftLimit)
|
||||
require.EqualValues(t, softLimit, *feature.SoftLimit)
|
||||
require.EqualValues(t, defaultLimit, *feature.Limit)
|
||||
require.NotNil(t, feature.Actual)
|
||||
require.EqualValues(t, actualAgents, *feature.Actual)
|
||||
require.NotNil(t, feature.UsagePeriod)
|
||||
@@ -1731,8 +1735,8 @@ func TestManagedAgentLimitDefault(t *testing.T) {
|
||||
require.NotZero(t, feature.UsagePeriod.End)
|
||||
})
|
||||
|
||||
// "Premium" licenses with an explicit managed agent limit should not
|
||||
// receive a default managed agent limit.
|
||||
// "Premium" licenses with an explicit managed agent limit should use
|
||||
// that value instead of the default.
|
||||
t.Run("PremiumExplicitValues", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -1744,9 +1748,8 @@ func TestManagedAgentLimitDefault(t *testing.T) {
|
||||
JWT: coderdenttest.GenerateLicense(t, coderdenttest.LicenseOptions{
|
||||
FeatureSet: codersdk.FeatureSetPremium,
|
||||
Features: license.Features{
|
||||
codersdk.FeatureUserLimit: 100,
|
||||
codersdk.FeatureName("managed_agent_limit_soft"): 100,
|
||||
codersdk.FeatureName("managed_agent_limit_hard"): 200,
|
||||
codersdk.FeatureUserLimit: 100,
|
||||
codersdk.FeatureManagedAgentLimit: 100,
|
||||
},
|
||||
}),
|
||||
}
|
||||
@@ -1768,9 +1771,7 @@ func TestManagedAgentLimitDefault(t *testing.T) {
|
||||
require.True(t, ok, "feature %s not found", codersdk.FeatureManagedAgentLimit)
|
||||
require.Equal(t, codersdk.EntitlementEntitled, feature.Entitlement)
|
||||
require.NotNil(t, feature.Limit)
|
||||
require.EqualValues(t, 200, *feature.Limit)
|
||||
require.NotNil(t, feature.SoftLimit)
|
||||
require.EqualValues(t, 100, *feature.SoftLimit)
|
||||
require.EqualValues(t, 100, *feature.Limit)
|
||||
require.NotNil(t, feature.Actual)
|
||||
require.EqualValues(t, actualAgents, *feature.Actual)
|
||||
require.NotNil(t, feature.UsagePeriod)
|
||||
@@ -1792,9 +1793,8 @@ func TestManagedAgentLimitDefault(t *testing.T) {
|
||||
JWT: coderdenttest.GenerateLicense(t, coderdenttest.LicenseOptions{
|
||||
FeatureSet: codersdk.FeatureSetPremium,
|
||||
Features: license.Features{
|
||||
codersdk.FeatureUserLimit: 100,
|
||||
codersdk.FeatureName("managed_agent_limit_soft"): 0,
|
||||
codersdk.FeatureName("managed_agent_limit_hard"): 0,
|
||||
codersdk.FeatureUserLimit: 100,
|
||||
codersdk.FeatureManagedAgentLimit: 0,
|
||||
},
|
||||
}),
|
||||
}
|
||||
@@ -1818,8 +1818,6 @@ func TestManagedAgentLimitDefault(t *testing.T) {
|
||||
require.False(t, feature.Enabled)
|
||||
require.NotNil(t, feature.Limit)
|
||||
require.EqualValues(t, 0, *feature.Limit)
|
||||
require.NotNil(t, feature.SoftLimit)
|
||||
require.EqualValues(t, 0, *feature.SoftLimit)
|
||||
require.NotNil(t, feature.Actual)
|
||||
require.EqualValues(t, actualAgents, *feature.Actual)
|
||||
require.NotNil(t, feature.UsagePeriod)
|
||||
|
||||
@@ -13,18 +13,15 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/prebuilds"
|
||||
)
|
||||
|
||||
type EnterpriseClaimer struct {
|
||||
store database.Store
|
||||
type EnterpriseClaimer struct{}
|
||||
|
||||
func NewEnterpriseClaimer() *EnterpriseClaimer {
|
||||
return &EnterpriseClaimer{}
|
||||
}
|
||||
|
||||
func NewEnterpriseClaimer(store database.Store) *EnterpriseClaimer {
|
||||
return &EnterpriseClaimer{
|
||||
store: store,
|
||||
}
|
||||
}
|
||||
|
||||
func (c EnterpriseClaimer) Claim(
|
||||
func (EnterpriseClaimer) Claim(
|
||||
ctx context.Context,
|
||||
store database.Store,
|
||||
now time.Time,
|
||||
userID uuid.UUID,
|
||||
name string,
|
||||
@@ -33,7 +30,7 @@ func (c EnterpriseClaimer) Claim(
|
||||
nextStartAt sql.NullTime,
|
||||
ttl sql.NullInt64,
|
||||
) (*uuid.UUID, error) {
|
||||
result, err := c.store.ClaimPrebuiltWorkspace(ctx, database.ClaimPrebuiltWorkspaceParams{
|
||||
result, err := store.ClaimPrebuiltWorkspace(ctx, database.ClaimPrebuiltWorkspaceParams{
|
||||
NewUserID: userID,
|
||||
NewName: name,
|
||||
Now: now,
|
||||
|
||||
@@ -175,7 +175,7 @@ func TestClaimPrebuild(t *testing.T) {
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer(spy)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer()
|
||||
api.AGPL.PrebuildsClaimer.Store(&claimer)
|
||||
|
||||
version := coderdtest.CreateTemplateVersion(t, client, orgID, templateWithAgentAndPresetsWithPrebuilds(desiredInstances))
|
||||
|
||||
@@ -604,6 +604,25 @@ func (api *API) workspaceProxyRegister(rw http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
// Load the mesh key directly from the database. We don't retrieve the mesh
|
||||
// key from the built-in DERP server because it may not be enabled.
|
||||
//
|
||||
// The mesh key is always generated at startup by an enterprise coderd
|
||||
// server.
|
||||
var meshKey string
|
||||
if req.DerpEnabled {
|
||||
var err error
|
||||
meshKey, err = api.Database.GetDERPMeshKey(ctx)
|
||||
if err != nil {
|
||||
httpapi.InternalServerError(rw, xerrors.Errorf("get DERP mesh key: %w", err))
|
||||
return
|
||||
}
|
||||
if meshKey == "" {
|
||||
httpapi.InternalServerError(rw, xerrors.New("mesh key is empty"))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
startingRegionID, _ := getProxyDERPStartingRegionID(api.Options.BaseDERPMap)
|
||||
// #nosec G115 - Safe conversion as DERP region IDs are small integers expected to be within int32 range
|
||||
regionID := int32(startingRegionID) + proxy.RegionID
|
||||
@@ -710,7 +729,7 @@ func (api *API) workspaceProxyRegister(rw http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
httpapi.Write(ctx, rw, http.StatusCreated, wsproxysdk.RegisterWorkspaceProxyResponse{
|
||||
DERPMeshKey: api.DERPServer.MeshKey(),
|
||||
DERPMeshKey: meshKey,
|
||||
DERPRegionID: regionID,
|
||||
DERPMap: api.AGPL.DERPMap(),
|
||||
DERPForceWebSockets: api.DeploymentValues.DERP.Config.ForceWebSockets.Value(),
|
||||
|
||||
@@ -2,12 +2,15 @@ package coderd_test
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -16,6 +19,7 @@ import (
|
||||
"github.com/sqlc-dev/pqtype"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"tailscale.com/tailcfg"
|
||||
|
||||
"cdr.dev/slog/v3/sloggers/slogtest"
|
||||
"github.com/coder/coder/v2/agent/agenttest"
|
||||
@@ -34,6 +38,7 @@ import (
|
||||
"github.com/coder/coder/v2/enterprise/wsproxy/wsproxysdk"
|
||||
"github.com/coder/coder/v2/provisioner/echo"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
|
||||
func TestRegions(t *testing.T) {
|
||||
@@ -278,10 +283,11 @@ func TestWorkspaceProxyCRUD(t *testing.T) {
|
||||
func TestProxyRegisterDeregister(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
setup := func(t *testing.T) (*codersdk.Client, database.Store) {
|
||||
setupWithDeploymentValues := func(t *testing.T, dv *codersdk.DeploymentValues) (*codersdk.Client, database.Store) {
|
||||
db, pubsub := dbtestutil.NewDB(t)
|
||||
client, _ := coderdenttest.New(t, &coderdenttest.Options{
|
||||
Options: &coderdtest.Options{
|
||||
DeploymentValues: dv,
|
||||
Database: db,
|
||||
Pubsub: pubsub,
|
||||
IncludeProvisionerDaemon: true,
|
||||
@@ -297,6 +303,11 @@ func TestProxyRegisterDeregister(t *testing.T) {
|
||||
return client, db
|
||||
}
|
||||
|
||||
setup := func(t *testing.T) (*codersdk.Client, database.Store) {
|
||||
dv := coderdtest.DeploymentValues(t)
|
||||
return setupWithDeploymentValues(t, dv)
|
||||
}
|
||||
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -363,7 +374,7 @@ func TestProxyRegisterDeregister(t *testing.T) {
|
||||
req = wsproxysdk.RegisterWorkspaceProxyRequest{
|
||||
AccessURL: "https://cool.proxy.coder.test",
|
||||
WildcardHostname: "*.cool.proxy.coder.test",
|
||||
DerpEnabled: false,
|
||||
DerpEnabled: true,
|
||||
ReplicaID: req.ReplicaID,
|
||||
ReplicaHostname: "venus",
|
||||
ReplicaError: "error",
|
||||
@@ -608,6 +619,99 @@ func TestProxyRegisterDeregister(t *testing.T) {
|
||||
require.True(t, ok, "expected to register replica %d", i)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("RegisterWithDisabledBuiltInDERP/DerpEnabled", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a DERP map file. Currently, Coder refuses to start if there
|
||||
// are zero DERP regions.
|
||||
// TODO: ideally coder can start without any DERP servers if the
|
||||
// customer is going to be using DERPs via proxies. We could make it
|
||||
// a configuration value to allow an empty DERP map on startup or
|
||||
// something.
|
||||
tmpDir := t.TempDir()
|
||||
derpPath := filepath.Join(tmpDir, "derp.json")
|
||||
content, err := json.Marshal(&tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
1: {
|
||||
Nodes: []*tailcfg.DERPNode{{}},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, os.WriteFile(derpPath, content, 0o600))
|
||||
|
||||
dv := coderdtest.DeploymentValues(t)
|
||||
dv.DERP.Server.Enable = false // disable built-in DERP server
|
||||
dv.DERP.Config.Path = serpent.String(derpPath)
|
||||
client, _ := setupWithDeploymentValues(t, dv)
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
createRes, err := client.CreateWorkspaceProxy(ctx, codersdk.CreateWorkspaceProxyRequest{
|
||||
Name: "proxy",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
proxyClient := wsproxysdk.New(client.URL, createRes.ProxyToken)
|
||||
registerRes, err := proxyClient.RegisterWorkspaceProxy(ctx, wsproxysdk.RegisterWorkspaceProxyRequest{
|
||||
AccessURL: "https://proxy.coder.test",
|
||||
WildcardHostname: "*.proxy.coder.test",
|
||||
DerpEnabled: true,
|
||||
ReplicaID: uuid.New(),
|
||||
ReplicaHostname: "venus",
|
||||
ReplicaError: "",
|
||||
ReplicaRelayAddress: "http://127.0.0.1:8080",
|
||||
Version: buildinfo.Version(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Should still be able to retrieve the DERP mesh key from the database,
|
||||
// even though the built-in DERP server is disabled.
|
||||
require.Equal(t, registerRes.DERPMeshKey, coderdtest.DefaultDERPMeshKey)
|
||||
})
|
||||
|
||||
t.Run("RegisterWithDisabledBuiltInDERP/DerpEnabled", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Same as above.
|
||||
tmpDir := t.TempDir()
|
||||
derpPath := filepath.Join(tmpDir, "derp.json")
|
||||
content, err := json.Marshal(&tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
1: {
|
||||
Nodes: []*tailcfg.DERPNode{{}},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, os.WriteFile(derpPath, content, 0o600))
|
||||
|
||||
dv := coderdtest.DeploymentValues(t)
|
||||
dv.DERP.Server.Enable = false // disable built-in DERP server
|
||||
dv.DERP.Config.Path = serpent.String(derpPath)
|
||||
client, _ := setupWithDeploymentValues(t, dv)
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
createRes, err := client.CreateWorkspaceProxy(ctx, codersdk.CreateWorkspaceProxyRequest{
|
||||
Name: "proxy",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
proxyClient := wsproxysdk.New(client.URL, createRes.ProxyToken)
|
||||
registerRes, err := proxyClient.RegisterWorkspaceProxy(ctx, wsproxysdk.RegisterWorkspaceProxyRequest{
|
||||
AccessURL: "https://proxy.coder.test",
|
||||
WildcardHostname: "*.proxy.coder.test",
|
||||
DerpEnabled: false,
|
||||
ReplicaID: uuid.New(),
|
||||
ReplicaHostname: "venus",
|
||||
ReplicaError: "",
|
||||
ReplicaRelayAddress: "http://127.0.0.1:8080",
|
||||
Version: buildinfo.Version(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// The server shouldn't bother querying or returning the DERP mesh key
|
||||
// if the proxy's DERP server is disabled.
|
||||
require.Empty(t, registerRes.DERPMeshKey)
|
||||
})
|
||||
}
|
||||
|
||||
func TestIssueSignedAppToken(t *testing.T) {
|
||||
|
||||
@@ -1989,7 +1989,7 @@ func TestPrebuildsAutobuild(t *testing.T) {
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer(db)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer()
|
||||
api.AGPL.PrebuildsClaimer.Store(&claimer)
|
||||
|
||||
// Setup user, template and template version with a preset with 1 prebuild instance
|
||||
@@ -2113,7 +2113,7 @@ func TestPrebuildsAutobuild(t *testing.T) {
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer(db)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer()
|
||||
api.AGPL.PrebuildsClaimer.Store(&claimer)
|
||||
|
||||
// Setup user, template and template version with a preset with 1 prebuild instance
|
||||
@@ -2237,7 +2237,7 @@ func TestPrebuildsAutobuild(t *testing.T) {
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer(db)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer()
|
||||
api.AGPL.PrebuildsClaimer.Store(&claimer)
|
||||
|
||||
// Setup user, template and template version with a preset with 1 prebuild instance
|
||||
@@ -2383,7 +2383,7 @@ func TestPrebuildsAutobuild(t *testing.T) {
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer(db)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer()
|
||||
api.AGPL.PrebuildsClaimer.Store(&claimer)
|
||||
|
||||
// Setup user, template and template version with a preset with 1 prebuild instance
|
||||
@@ -2530,7 +2530,7 @@ func TestPrebuildsAutobuild(t *testing.T) {
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer(db)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer()
|
||||
api.AGPL.PrebuildsClaimer.Store(&claimer)
|
||||
|
||||
// Setup user, template and template version with a preset with 1 prebuild instance
|
||||
@@ -2977,7 +2977,7 @@ func TestWorkspaceProvisionerdServerMetrics(t *testing.T) {
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer(db)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer()
|
||||
api.AGPL.PrebuildsClaimer.Store(&claimer)
|
||||
|
||||
organizationName, err := client.Organization(ctx, owner.OrganizationID)
|
||||
@@ -4720,7 +4720,7 @@ func TestWorkspaceAITask(t *testing.T) {
|
||||
Features: license.Features{
|
||||
codersdk.FeatureTemplateRBAC: 1,
|
||||
},
|
||||
}).ManagedAgentLimit(10, 20),
|
||||
}).ManagedAgentLimit(10),
|
||||
})
|
||||
|
||||
client, _ := coderdtest.CreateAnotherUser(t, owner, first.OrganizationID,
|
||||
|
||||
@@ -94,7 +94,7 @@
|
||||
# 3. Update the sha256 and run again
|
||||
# 4. Nix will fail with the correct vendorHash
|
||||
# 5. Update the vendorHash
|
||||
sqlc-custom = unstablePkgs.buildGo124Module {
|
||||
sqlc-custom = unstablePkgs.buildGo125Module {
|
||||
pname = "sqlc";
|
||||
version = "coder-fork-aab4e865a51df0c43e1839f81a9d349b41d14f05";
|
||||
|
||||
@@ -156,7 +156,7 @@
|
||||
gnused
|
||||
gnugrep
|
||||
gnutar
|
||||
unstablePkgs.go_1_24
|
||||
unstablePkgs.go_1_25
|
||||
gofumpt
|
||||
go-migrate
|
||||
(pinnedPkgs.golangci-lint)
|
||||
@@ -224,7 +224,7 @@
|
||||
# slim bundle into it's own derivation.
|
||||
buildFat =
|
||||
osArch:
|
||||
unstablePkgs.buildGo124Module {
|
||||
unstablePkgs.buildGo125Module {
|
||||
name = "coder-${osArch}";
|
||||
# Updated with ./scripts/update-flake.sh`.
|
||||
# This should be updated whenever go.mod changes!
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module github.com/coder/coder/v2
|
||||
|
||||
go 1.25.6
|
||||
go 1.25.7
|
||||
|
||||
// Required until a v3 of chroma is created to lazily initialize all XML files.
|
||||
// None of our dependencies seem to use the registries anyways, so this
|
||||
@@ -473,7 +473,7 @@ require (
|
||||
github.com/anthropics/anthropic-sdk-go v1.19.0
|
||||
github.com/brianvoe/gofakeit/v7 v7.14.0
|
||||
github.com/coder/agentapi-sdk-go v0.0.0-20250505131810-560d1d88d225
|
||||
github.com/coder/aibridge v1.0.0
|
||||
github.com/coder/aibridge v1.0.9
|
||||
github.com/coder/aisdk-go v0.0.9
|
||||
github.com/coder/boundary v0.6.0
|
||||
github.com/coder/preview v1.0.4
|
||||
@@ -591,4 +591,9 @@ tool (
|
||||
storj.io/drpc/cmd/protoc-gen-go-drpc
|
||||
)
|
||||
|
||||
// Replace sdks with our own optimized forks until relevant upstream PRs are merged.
|
||||
// https://github.com/anthropics/anthropic-sdk-go/pull/262
|
||||
replace github.com/anthropics/anthropic-sdk-go v1.19.0 => github.com/dannykopping/anthropic-sdk-go v0.0.0-20251230111224-88a4315810bd
|
||||
|
||||
// https://github.com/openai/openai-go/pull/602
|
||||
replace github.com/openai/openai-go/v3 => github.com/SasSwart/openai-go/v3 v3.0.0-20260204134041-fb987b42a728
|
||||
|
||||