Compare commits
108 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 031e1c2d10 | |||
| ad4d5ed70c | |||
| 7b0461a99b | |||
| 75a41e6f8d | |||
| 4f9cbe6db0 | |||
| 3fc2ab38bf | |||
| 800922edec | |||
| 868b654439 | |||
| 07911dd7c7 | |||
| 577c3aee9b | |||
| ad5957c646 | |||
| f03be7da29 | |||
| 4c7d6403c8 | |||
| b14a709adb | |||
| 3d97f677e5 | |||
| 8985120c36 | |||
| c60f802580 | |||
| 37aecda165 | |||
| 14b4650d6c | |||
| b035843484 | |||
| 21eabb1d73 | |||
| 536bca7ea9 | |||
| e45635aab6 | |||
| 036ed5672f | |||
| 90cf4809ec | |||
| 4847920407 | |||
| a464ab67c6 | |||
| 0611e90dd3 | |||
| 5da28ff72f | |||
| f5d4926bc1 | |||
| 9f6ce7542a | |||
| d09300eadf | |||
| 9a417df940 | |||
| 8ee4f594d5 | |||
| 9eda6569b8 | |||
| bb7b49de6a | |||
| 5ae0e08494 | |||
| 04b0253e8a | |||
| 06e396188f | |||
| 62704eb858 | |||
| 1a94aa67a3 | |||
| 7473b57e54 | |||
| 57ab991a95 | |||
| 1b31279506 | |||
| 4f1fd82ed7 | |||
| 4ce4b5ef9f | |||
| dfbd541cee | |||
| 921fad098b | |||
| 264ae77458 | |||
| c2c225052a | |||
| e13f2a9869 | |||
| d06b21df45 | |||
| 327c885292 | |||
| 7a8d8d2f86 | |||
| 7090a1e205 | |||
| f358a6db11 | |||
| 2204731ddb | |||
| d7037280da | |||
| 799b190dee | |||
| 3eeeabfd68 | |||
| 7dfa33b410 | |||
| e008f720b6 | |||
| d4cd982608 | |||
| 3ee4f6d0ec | |||
| c352a51b22 | |||
| 2ee3386cc5 | |||
| 8f3bb0b0d1 | |||
| b1267c458c | |||
| a5c06a3751 | |||
| 7b44976618 | |||
| c3f41ce08c | |||
| 6f15b178a4 | |||
| 1375fd9ead | |||
| 7546e94534 | |||
| 59b2afaa80 | |||
| 303389e75a | |||
| 25d7f27cdb | |||
| f2e998848e | |||
| d2e54819bf | |||
| 806d7e4c11 | |||
| 7123518baa | |||
| bb186b8699 | |||
| bbca7f546c | |||
| 4bff2f7296 | |||
| c3cd3614e4 | |||
| 612aae2523 | |||
| 49f135bcd4 | |||
| f47f89d997 | |||
| 78bc5861e0 | |||
| 0d21365825 | |||
| 409360c62d | |||
| 6c8209bdf1 | |||
| ece531ab4e | |||
| 15c61906e2 | |||
| 8d6822b23a | |||
| 98834a7837 | |||
| 338b952d71 | |||
| 9b14fd3adc | |||
| b82693d4cc | |||
| f5858c8a18 | |||
| 9843adb8c6 | |||
| fa7baebdd8 | |||
| 3398833919 | |||
| 365ab0e609 | |||
| 7c948a7ad8 | |||
| e195856c43 | |||
| 6a81474ff0 | |||
| 6c49938fca |
@@ -1,4 +1,7 @@
|
||||
{
|
||||
"permissions": {
|
||||
"defaultMode": "bypassPermissions"
|
||||
},
|
||||
"hooks": {
|
||||
"PostToolUse": [
|
||||
{
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Start Docker service if not already running.
|
||||
sudo service docker start
|
||||
sudo service docker status >/dev/null 2>&1 || sudo service docker start
|
||||
|
||||
@@ -7,6 +7,6 @@ runs:
|
||||
- name: go install tools
|
||||
shell: bash
|
||||
run: |
|
||||
go install tool
|
||||
./.github/scripts/retry.sh -- go install tool
|
||||
# NOTE: protoc-gen-go cannot be installed with `go get`
|
||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.30
|
||||
./.github/scripts/retry.sh -- go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.30
|
||||
|
||||
@@ -4,7 +4,7 @@ description: |
|
||||
inputs:
|
||||
version:
|
||||
description: "The Go version to use."
|
||||
default: "1.24.11"
|
||||
default: "1.25.6"
|
||||
use-preinstalled-go:
|
||||
description: "Whether to use preinstalled Go."
|
||||
default: "false"
|
||||
@@ -22,14 +22,14 @@ runs:
|
||||
|
||||
- name: Install gotestsum
|
||||
shell: bash
|
||||
run: go install gotest.tools/gotestsum@0d9599e513d70e5792bb9334869f82f6e8b53d4d # main as of 2025-05-15
|
||||
run: ./.github/scripts/retry.sh -- go install gotest.tools/gotestsum@0d9599e513d70e5792bb9334869f82f6e8b53d4d # main as of 2025-05-15
|
||||
|
||||
- name: Install mtimehash
|
||||
shell: bash
|
||||
run: go install github.com/slsyy/mtimehash/cmd/mtimehash@a6b5da4ed2c4a40e7b805534b004e9fde7b53ce0 # v1.0.0
|
||||
run: ./.github/scripts/retry.sh -- go install github.com/slsyy/mtimehash/cmd/mtimehash@a6b5da4ed2c4a40e7b805534b004e9fde7b53ce0 # v1.0.0
|
||||
|
||||
# It isn't necessary that we ever do this, but it helps
|
||||
# separate the "setup" from the "run" times.
|
||||
- name: go mod download
|
||||
shell: bash
|
||||
run: go mod download -x
|
||||
run: ./.github/scripts/retry.sh -- go mod download -x
|
||||
|
||||
@@ -14,4 +14,4 @@ runs:
|
||||
# - https://github.com/sqlc-dev/sqlc/pull/4159
|
||||
shell: bash
|
||||
run: |
|
||||
CGO_ENABLED=1 go install github.com/coder/sqlc/cmd/sqlc@aab4e865a51df0c43e1839f81a9d349b41d14f05
|
||||
./.github/scripts/retry.sh -- env CGO_ENABLED=1 go install github.com/coder/sqlc/cmd/sqlc@aab4e865a51df0c43e1839f81a9d349b41d14f05
|
||||
|
||||
Executable
+50
@@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env bash
|
||||
# Retry a command with exponential backoff.
|
||||
#
|
||||
# Usage: retry.sh [--max-attempts N] -- <command...>
|
||||
#
|
||||
# Example:
|
||||
# retry.sh --max-attempts 3 -- go install gotest.tools/gotestsum@latest
|
||||
#
|
||||
# This will retry the command up to 3 times with exponential backoff
|
||||
# (2s, 4s, 8s delays between attempts).
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# shellcheck source=scripts/lib.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
|
||||
|
||||
max_attempts=3
|
||||
|
||||
args="$(getopt -o "" -l max-attempts: -- "$@")"
|
||||
eval set -- "$args"
|
||||
while true; do
|
||||
case "$1" in
|
||||
--max-attempts)
|
||||
max_attempts="$2"
|
||||
shift 2
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
*)
|
||||
error "Unrecognized option: $1"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ $# -lt 1 ]]; then
|
||||
error "Usage: retry.sh [--max-attempts N] -- <command...>"
|
||||
fi
|
||||
|
||||
attempt=1
|
||||
until "$@"; do
|
||||
if ((attempt >= max_attempts)); then
|
||||
error "Command failed after $max_attempts attempts: $*"
|
||||
fi
|
||||
delay=$((2 ** attempt))
|
||||
log "Attempt $attempt/$max_attempts failed, retrying in ${delay}s..."
|
||||
sleep "$delay"
|
||||
((attempt++))
|
||||
done
|
||||
+41
-29
@@ -40,7 +40,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
persist-credentials: false
|
||||
@@ -124,7 +124,7 @@ jobs:
|
||||
# runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
# steps:
|
||||
# - name: Checkout
|
||||
# uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
# uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
# with:
|
||||
# fetch-depth: 1
|
||||
# # See: https://github.com/stefanzweifel/git-auto-commit-action?tab=readme-ov-file#commits-made-by-this-action-do-not-trigger-new-workflow-runs
|
||||
@@ -162,7 +162,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
persist-credentials: false
|
||||
@@ -176,12 +176,12 @@ jobs:
|
||||
- name: Get golangci-lint cache dir
|
||||
run: |
|
||||
linter_ver=$(grep -Eo 'GOLANGCI_LINT_VERSION=\S+' dogfood/coder/Dockerfile | cut -d '=' -f 2)
|
||||
go install "github.com/golangci/golangci-lint/cmd/golangci-lint@v$linter_ver"
|
||||
./.github/scripts/retry.sh -- go install "github.com/golangci/golangci-lint/cmd/golangci-lint@v$linter_ver"
|
||||
dir=$(golangci-lint cache status | awk '/Dir/ { print $2 }')
|
||||
echo "LINT_CACHE_DIR=$dir" >> "$GITHUB_ENV"
|
||||
|
||||
- name: golangci-lint cache
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2
|
||||
with:
|
||||
path: |
|
||||
${{ env.LINT_CACHE_DIR }}
|
||||
@@ -256,7 +256,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
persist-credentials: false
|
||||
@@ -313,7 +313,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
persist-credentials: false
|
||||
@@ -329,7 +329,7 @@ jobs:
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Install shfmt
|
||||
run: go install mvdan.cc/sh/v3/cmd/shfmt@v3.7.0
|
||||
run: ./.github/scripts/retry.sh -- go install mvdan.cc/sh/v3/cmd/shfmt@v3.7.0
|
||||
|
||||
- name: make fmt
|
||||
timeout-minutes: 7
|
||||
@@ -386,7 +386,7 @@ jobs:
|
||||
uses: coder/setup-ramdisk-action@e1100847ab2d7bcd9d14bcda8f2d1b0f07b36f1b # v0.1.0
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
persist-credentials: false
|
||||
@@ -395,6 +395,18 @@ jobs:
|
||||
id: go-paths
|
||||
uses: ./.github/actions/setup-go-paths
|
||||
|
||||
# macOS default bash and coreutils are too old for our scripts
|
||||
# (lib.sh requires bash 4+, GNU getopt, make 4+).
|
||||
- name: Setup GNU tools (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
brew install bash gnu-getopt make
|
||||
{
|
||||
echo "$(brew --prefix bash)/bin"
|
||||
echo "$(brew --prefix gnu-getopt)/bin"
|
||||
echo "$(brew --prefix make)/libexec/gnubin"
|
||||
} >> "$GITHUB_PATH"
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
with:
|
||||
@@ -559,7 +571,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
persist-credentials: false
|
||||
@@ -621,7 +633,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
persist-credentials: false
|
||||
@@ -693,7 +705,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
persist-credentials: false
|
||||
@@ -720,7 +732,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
persist-credentials: false
|
||||
@@ -753,7 +765,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
persist-credentials: false
|
||||
@@ -833,7 +845,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
# 👇 Ensures Chromatic can read your full git history
|
||||
fetch-depth: 0
|
||||
@@ -849,7 +861,7 @@ jobs:
|
||||
# the check to pass. This is desired in PRs, but not in mainline.
|
||||
- name: Publish to Chromatic (non-mainline)
|
||||
if: github.ref != 'refs/heads/main' && github.repository_owner == 'coder'
|
||||
uses: chromaui/action@4c20b95e9d3209ecfdf9cd6aace6bbde71ba1694 # v13.3.4
|
||||
uses: chromaui/action@07791f8243f4cb2698bf4d00426baf4b2d1cb7e0 # v13.3.5
|
||||
env:
|
||||
NODE_OPTIONS: "--max_old_space_size=4096"
|
||||
STORYBOOK: true
|
||||
@@ -881,7 +893,7 @@ jobs:
|
||||
# infinitely "in progress" in mainline unless we re-review each build.
|
||||
- name: Publish to Chromatic (mainline)
|
||||
if: github.ref == 'refs/heads/main' && github.repository_owner == 'coder'
|
||||
uses: chromaui/action@4c20b95e9d3209ecfdf9cd6aace6bbde71ba1694 # v13.3.4
|
||||
uses: chromaui/action@07791f8243f4cb2698bf4d00426baf4b2d1cb7e0 # v13.3.5
|
||||
env:
|
||||
NODE_OPTIONS: "--max_old_space_size=4096"
|
||||
STORYBOOK: true
|
||||
@@ -914,7 +926,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
# 0 is required here for version.sh to work.
|
||||
fetch-depth: 0
|
||||
@@ -1018,7 +1030,7 @@ jobs:
|
||||
steps:
|
||||
# Harden Runner doesn't work on macOS
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
@@ -1068,7 +1080,7 @@ jobs:
|
||||
- name: Build dylibs
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
go mod download
|
||||
./.github/scripts/retry.sh -- go mod download
|
||||
|
||||
make gen/mark-fresh
|
||||
make build/coder-dylib
|
||||
@@ -1105,7 +1117,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
@@ -1117,10 +1129,10 @@ jobs:
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Install go-winres
|
||||
run: go install github.com/tc-hib/go-winres@d743268d7ea168077ddd443c4240562d4f5e8c3e # v0.3.3
|
||||
run: ./.github/scripts/retry.sh -- go install github.com/tc-hib/go-winres@d743268d7ea168077ddd443c4240562d4f5e8c3e # v0.3.3
|
||||
|
||||
- name: Install nfpm
|
||||
run: go install github.com/goreleaser/nfpm/v2/cmd/nfpm@v2.35.1
|
||||
run: ./.github/scripts/retry.sh -- go install github.com/goreleaser/nfpm/v2/cmd/nfpm@v2.35.1
|
||||
|
||||
- name: Install zstd
|
||||
run: sudo apt-get install -y zstd
|
||||
@@ -1128,7 +1140,7 @@ jobs:
|
||||
- name: Build
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
go mod download
|
||||
./.github/scripts/retry.sh -- go mod download
|
||||
make gen/mark-fresh
|
||||
make build
|
||||
|
||||
@@ -1160,7 +1172,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
@@ -1207,10 +1219,10 @@ jobs:
|
||||
java-version: "11.0"
|
||||
|
||||
- name: Install go-winres
|
||||
run: go install github.com/tc-hib/go-winres@d743268d7ea168077ddd443c4240562d4f5e8c3e # v0.3.3
|
||||
run: ./.github/scripts/retry.sh -- go install github.com/tc-hib/go-winres@d743268d7ea168077ddd443c4240562d4f5e8c3e # v0.3.3
|
||||
|
||||
- name: Install nfpm
|
||||
run: go install github.com/goreleaser/nfpm/v2/cmd/nfpm@v2.35.1
|
||||
run: ./.github/scripts/retry.sh -- go install github.com/goreleaser/nfpm/v2/cmd/nfpm@v2.35.1
|
||||
|
||||
- name: Install zstd
|
||||
run: sudo apt-get install -y zstd
|
||||
@@ -1258,7 +1270,7 @@ jobs:
|
||||
- name: Build
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
go mod download
|
||||
./.github/scripts/retry.sh -- go mod download
|
||||
|
||||
version="$(./scripts/version.sh)"
|
||||
tag="main-${version//+/-}"
|
||||
@@ -1557,7 +1569,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
persist-credentials: false
|
||||
|
||||
@@ -215,7 +215,7 @@ jobs:
|
||||
} >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Checkout create-task-action
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
path: ./.github/actions/create-task-action
|
||||
|
||||
@@ -249,7 +249,7 @@ jobs:
|
||||
} >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Checkout create-task-action
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
path: ./.github/actions/create-task-action
|
||||
|
||||
@@ -41,7 +41,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
@@ -70,7 +70,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
@@ -151,7 +151,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
@@ -6,7 +6,12 @@
|
||||
# - New PR opened: Initial documentation review
|
||||
# - PR updated (synchronize): Re-review after changes
|
||||
# - Label "doc-check" added: Manual trigger for review
|
||||
# - PR marked ready for review: Review when draft is promoted
|
||||
# - Workflow dispatch: Manual run with PR URL
|
||||
#
|
||||
# Note: This workflow requires access to secrets and will be skipped for:
|
||||
# - Any PR where secrets are not available
|
||||
# For these PRs, maintainers can manually trigger via workflow_dispatch.
|
||||
|
||||
name: AI Documentation Check
|
||||
|
||||
@@ -16,6 +21,7 @@ on:
|
||||
- opened
|
||||
- synchronize
|
||||
- labeled
|
||||
- ready_for_review
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
pr_url:
|
||||
@@ -32,13 +38,14 @@ jobs:
|
||||
doc-check:
|
||||
name: Analyze PR for Documentation Updates Needed
|
||||
runs-on: ubuntu-latest
|
||||
# Run on: opened, synchronize, labeled (with doc-check label), or workflow_dispatch
|
||||
# Run on: opened, synchronize, labeled (with doc-check label), ready_for_review, or workflow_dispatch
|
||||
# Skip draft PRs unless manually triggered
|
||||
if: |
|
||||
(
|
||||
github.event.action == 'opened' ||
|
||||
github.event.action == 'synchronize' ||
|
||||
github.event.label.name == 'doc-check' ||
|
||||
github.event.action == 'ready_for_review' ||
|
||||
github.event_name == 'workflow_dispatch'
|
||||
) &&
|
||||
(github.event.pull_request.draft == false || github.event_name == 'workflow_dispatch')
|
||||
@@ -52,13 +59,36 @@ jobs:
|
||||
actions: write
|
||||
|
||||
steps:
|
||||
- name: Check if secrets are available
|
||||
id: check-secrets
|
||||
env:
|
||||
CODER_URL: ${{ secrets.DOC_CHECK_CODER_URL }}
|
||||
CODER_TOKEN: ${{ secrets.DOC_CHECK_CODER_SESSION_TOKEN }}
|
||||
run: |
|
||||
if [[ -z "${CODER_URL}" || -z "${CODER_TOKEN}" ]]; then
|
||||
echo "skip=true" >> "${GITHUB_OUTPUT}"
|
||||
echo "Secrets not available - skipping doc-check."
|
||||
echo "This is expected for PRs where secrets are not available."
|
||||
echo "Maintainers can manually trigger via workflow_dispatch if needed."
|
||||
{
|
||||
echo "⚠️ Workflow skipped: Secrets not available"
|
||||
echo ""
|
||||
echo "This workflow requires secrets that are unavailable for this run."
|
||||
echo "Maintainers can manually trigger via workflow_dispatch if needed."
|
||||
} >> "${GITHUB_STEP_SUMMARY}"
|
||||
else
|
||||
echo "skip=false" >> "${GITHUB_OUTPUT}"
|
||||
fi
|
||||
|
||||
- name: Setup Coder CLI
|
||||
if: steps.check-secrets.outputs.skip != 'true'
|
||||
uses: coder/setup-action@4a607a8113d4e676e2d7c34caa20a814bc88bfda # v1
|
||||
with:
|
||||
access_url: ${{ secrets.DOC_CHECK_CODER_URL }}
|
||||
coder_session_token: ${{ secrets.DOC_CHECK_CODER_SESSION_TOKEN }}
|
||||
|
||||
- name: Determine PR Context
|
||||
if: steps.check-secrets.outputs.skip != 'true'
|
||||
id: determine-context
|
||||
env:
|
||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||
@@ -105,6 +135,9 @@ jobs:
|
||||
labeled)
|
||||
echo "trigger_type=label_requested" >> "${GITHUB_OUTPUT}"
|
||||
;;
|
||||
ready_for_review)
|
||||
echo "trigger_type=ready_for_review" >> "${GITHUB_OUTPUT}"
|
||||
;;
|
||||
*)
|
||||
echo "trigger_type=unknown" >> "${GITHUB_OUTPUT}"
|
||||
;;
|
||||
@@ -116,6 +149,7 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Build task prompt
|
||||
if: steps.check-secrets.outputs.skip != 'true'
|
||||
id: extract-context
|
||||
env:
|
||||
PR_NUMBER: ${{ steps.determine-context.outputs.pr_number }}
|
||||
@@ -134,6 +168,9 @@ jobs:
|
||||
label_requested)
|
||||
CONTEXT="A documentation review was REQUESTED via label. Perform a thorough documentation review."
|
||||
;;
|
||||
ready_for_review)
|
||||
CONTEXT="This PR was marked READY FOR REVIEW (converted from draft). Perform a thorough documentation review."
|
||||
;;
|
||||
manual)
|
||||
CONTEXT="This is a MANUAL review request. Perform a thorough documentation review."
|
||||
;;
|
||||
@@ -149,6 +186,8 @@ jobs:
|
||||
|
||||
Use \`gh\` to get PR details, diff, and all comments. Check for previous doc-check comments (from coder-doc-check) and only post a new comment if it adds value.
|
||||
|
||||
**Do not comment if no documentation changes are needed.**
|
||||
|
||||
## Comment format
|
||||
|
||||
Use this structure (only include relevant sections):
|
||||
@@ -165,9 +204,6 @@ jobs:
|
||||
### New Documentation Needed
|
||||
- [ ] \`docs/suggested/path.md\` - [what should be documented]
|
||||
|
||||
### No Changes Needed
|
||||
[brief explanation - use this OR the above sections, not both]
|
||||
|
||||
---
|
||||
*Automated review via [Coder Tasks](https://coder.com/docs/ai-coder/tasks)*
|
||||
\`\`\`"
|
||||
@@ -180,7 +216,8 @@ jobs:
|
||||
} >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Checkout create-task-action
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
if: steps.check-secrets.outputs.skip != 'true'
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
path: ./.github/actions/create-task-action
|
||||
@@ -189,22 +226,24 @@ jobs:
|
||||
repository: coder/create-task-action
|
||||
|
||||
- name: Create Coder Task for Documentation Check
|
||||
if: steps.check-secrets.outputs.skip != 'true'
|
||||
id: create_task
|
||||
uses: ./.github/actions/create-task-action
|
||||
with:
|
||||
coder-url: ${{ secrets.DOC_CHECK_CODER_URL }}
|
||||
coder-token: ${{ secrets.DOC_CHECK_CODER_SESSION_TOKEN }}
|
||||
coder-organization: "default"
|
||||
coder-template-name: doc-check-bot
|
||||
coder-template-name: coder-workflow-bot
|
||||
coder-template-preset: ${{ steps.determine-context.outputs.template_preset }}
|
||||
coder-task-name-prefix: doc-check
|
||||
coder-task-prompt: ${{ steps.extract-context.outputs.task_prompt }}
|
||||
coder-username: doc-check-bot
|
||||
github-token: ${{ github.token }}
|
||||
github-issue-url: ${{ steps.determine-context.outputs.pr_url }}
|
||||
comment-on-issue: true
|
||||
comment-on-issue: false
|
||||
|
||||
- name: Write Task Info
|
||||
if: steps.check-secrets.outputs.skip != 'true'
|
||||
env:
|
||||
TASK_CREATED: ${{ steps.create_task.outputs.task-created }}
|
||||
TASK_NAME: ${{ steps.create_task.outputs.task-name }}
|
||||
@@ -222,6 +261,7 @@ jobs:
|
||||
} >> "${GITHUB_STEP_SUMMARY}"
|
||||
|
||||
- name: Wait for Task Completion
|
||||
if: steps.check-secrets.outputs.skip != 'true'
|
||||
id: wait_task
|
||||
env:
|
||||
TASK_NAME: ${{ steps.create_task.outputs.task-name }}
|
||||
@@ -311,7 +351,7 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Fetch Task Logs
|
||||
if: always()
|
||||
if: always() && steps.check-secrets.outputs.skip != 'true'
|
||||
env:
|
||||
TASK_NAME: ${{ steps.create_task.outputs.task-name }}
|
||||
run: |
|
||||
@@ -324,7 +364,7 @@ jobs:
|
||||
echo "::endgroup::"
|
||||
|
||||
- name: Cleanup Task
|
||||
if: always()
|
||||
if: always() && steps.check-secrets.outputs.skip != 'true'
|
||||
env:
|
||||
TASK_NAME: ${{ steps.create_task.outputs.task-name }}
|
||||
run: |
|
||||
@@ -336,7 +376,7 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Write Final Summary
|
||||
if: always()
|
||||
if: always() && steps.check-secrets.outputs.skip != 'true'
|
||||
env:
|
||||
TASK_NAME: ${{ steps.create_task.outputs.task-name }}
|
||||
TASK_MESSAGE: ${{ steps.wait_task.outputs.task_message }}
|
||||
|
||||
@@ -43,7 +43,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
# on version 2.29 and above.
|
||||
nix_version: "2.28.5"
|
||||
|
||||
- uses: nix-community/cache-nix-action@b426b118b6dc86d6952988d396aa7c6b09776d08 # v7.0.0
|
||||
- uses: nix-community/cache-nix-action@106bba72ed8e29c8357661199511ef07790175e9 # v7.0.1
|
||||
with:
|
||||
# restore and save a cache using this key
|
||||
primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
|
||||
@@ -130,7 +130,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
uses: coder/setup-ramdisk-action@e1100847ab2d7bcd9d14bcda8f2d1b0f07b36f1b # v0.1.0
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
persist-credentials: false
|
||||
|
||||
@@ -44,7 +44,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -81,7 +81,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
@@ -233,7 +233,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
@@ -337,7 +337,7 @@ jobs:
|
||||
kubectl create namespace "pr${PR_NUMBER}"
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ jobs:
|
||||
steps:
|
||||
# Harden Runner doesn't work on macOS.
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
@@ -121,7 +121,7 @@ jobs:
|
||||
- name: Build dylibs
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
go mod download
|
||||
./.github/scripts/retry.sh -- go mod download
|
||||
|
||||
make gen/mark-fresh
|
||||
make build/coder-dylib
|
||||
@@ -169,7 +169,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
@@ -259,7 +259,7 @@ jobs:
|
||||
java-version: "11.0"
|
||||
|
||||
- name: Install go-winres
|
||||
run: go install github.com/tc-hib/go-winres@d743268d7ea168077ddd443c4240562d4f5e8c3e # v0.3.3
|
||||
run: ./.github/scripts/retry.sh -- go install github.com/tc-hib/go-winres@d743268d7ea168077ddd443c4240562d4f5e8c3e # v0.3.3
|
||||
|
||||
- name: Install nsis and zstd
|
||||
run: sudo apt-get install -y nsis zstd
|
||||
@@ -341,7 +341,7 @@ jobs:
|
||||
- name: Build binaries
|
||||
run: |
|
||||
set -euo pipefail
|
||||
go mod download
|
||||
./.github/scripts/retry.sh -- go mod download
|
||||
|
||||
version="$(./scripts/version.sh)"
|
||||
make gen/mark-fresh
|
||||
@@ -888,7 +888,7 @@ jobs:
|
||||
GH_TOKEN: ${{ secrets.CDRCI_GITHUB_TOKEN }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
@@ -976,7 +976,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
persist-credentials: false
|
||||
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
@@ -97,11 +97,11 @@ jobs:
|
||||
- name: Install yq
|
||||
run: go run github.com/mikefarah/yq/v4@v4.44.3
|
||||
- name: Install mockgen
|
||||
run: go install go.uber.org/mock/mockgen@v0.5.0
|
||||
run: ./.github/scripts/retry.sh -- go install go.uber.org/mock/mockgen@v0.6.0
|
||||
- name: Install protoc-gen-go
|
||||
run: go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.30
|
||||
run: ./.github/scripts/retry.sh -- go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.30
|
||||
- name: Install protoc-gen-go-drpc
|
||||
run: go install storj.io/drpc/cmd/protoc-gen-go-drpc@v0.0.34
|
||||
run: ./.github/scripts/retry.sh -- go install storj.io/drpc/cmd/protoc-gen-go-drpc@v0.0.34
|
||||
- name: Install Protoc
|
||||
run: |
|
||||
# protoc must be in lockstep with our dogfood Dockerfile or the
|
||||
|
||||
@@ -101,7 +101,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Run delete-old-branches-action
|
||||
|
||||
@@ -153,7 +153,7 @@ jobs:
|
||||
} >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
path: ./.github/actions/create-task-action
|
||||
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
+10
-6
@@ -882,12 +882,16 @@ const (
|
||||
)
|
||||
|
||||
func (a *agent) reportConnection(id uuid.UUID, connectionType proto.Connection_Type, ip string) (disconnected func(code int, reason string)) {
|
||||
// Remove the port from the IP because ports are not supported in coderd.
|
||||
if host, _, err := net.SplitHostPort(ip); err != nil {
|
||||
a.logger.Error(a.hardCtx, "split host and port for connection report failed", slog.F("ip", ip), slog.Error(err))
|
||||
} else {
|
||||
// Best effort.
|
||||
ip = host
|
||||
// A blank IP can unfortunately happen if the connection is broken in a data race before we get to introspect it. We
|
||||
// still report it, and the recipient can handle a blank IP.
|
||||
if ip != "" {
|
||||
// Remove the port from the IP because ports are not supported in coderd.
|
||||
if host, _, err := net.SplitHostPort(ip); err != nil {
|
||||
a.logger.Error(a.hardCtx, "split host and port for connection report failed", slog.F("ip", ip), slog.Error(err))
|
||||
} else {
|
||||
// Best effort.
|
||||
ip = host
|
||||
}
|
||||
}
|
||||
|
||||
// If the IP is "localhost" (which it can be in some cases), set it to
|
||||
|
||||
+55
-9
@@ -121,7 +121,8 @@ func TestAgent_ImmediateClose(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// NOTE: These tests only work when your default shell is bash for some reason.
|
||||
// NOTE(Cian): I noticed that these tests would fail when my default shell was zsh.
|
||||
// Writing "exit 0" to stdin before closing fixed the issue for me.
|
||||
|
||||
func TestAgent_Stats_SSH(t *testing.T) {
|
||||
t.Parallel()
|
||||
@@ -148,16 +149,37 @@ func TestAgent_Stats_SSH(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
var s *proto.Stats
|
||||
// We are looking for four different stats to be reported. They might not all
|
||||
// arrive at the same time, so we loop until we've seen them all.
|
||||
var connectionCountSeen, rxBytesSeen, txBytesSeen, sessionCountSSHSeen bool
|
||||
require.Eventuallyf(t, func() bool {
|
||||
var ok bool
|
||||
s, ok = <-stats
|
||||
return ok && s.ConnectionCount > 0 && s.RxBytes > 0 && s.TxBytes > 0 && s.SessionCountSsh == 1
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if s.ConnectionCount > 0 {
|
||||
connectionCountSeen = true
|
||||
}
|
||||
if s.RxBytes > 0 {
|
||||
rxBytesSeen = true
|
||||
}
|
||||
if s.TxBytes > 0 {
|
||||
txBytesSeen = true
|
||||
}
|
||||
if s.SessionCountSsh == 1 {
|
||||
sessionCountSSHSeen = true
|
||||
}
|
||||
return connectionCountSeen && rxBytesSeen && txBytesSeen && sessionCountSSHSeen
|
||||
}, testutil.WaitLong, testutil.IntervalFast,
|
||||
"never saw stats: %+v", s,
|
||||
"never saw all stats: %+v, saw connectionCount: %t, rxBytes: %t, txBytes: %t, sessionCountSsh: %t",
|
||||
s, connectionCountSeen, rxBytesSeen, txBytesSeen, sessionCountSSHSeen,
|
||||
)
|
||||
_, err = stdin.Write([]byte("exit 0\n"))
|
||||
require.NoError(t, err, "writing exit to stdin")
|
||||
_ = stdin.Close()
|
||||
err = session.Wait()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "waiting for session to exit")
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -183,12 +205,31 @@ func TestAgent_Stats_ReconnectingPTY(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
var s *proto.Stats
|
||||
// We are looking for four different stats to be reported. They might not all
|
||||
// arrive at the same time, so we loop until we've seen them all.
|
||||
var connectionCountSeen, rxBytesSeen, txBytesSeen, sessionCountReconnectingPTYSeen bool
|
||||
require.Eventuallyf(t, func() bool {
|
||||
var ok bool
|
||||
s, ok = <-stats
|
||||
return ok && s.ConnectionCount > 0 && s.RxBytes > 0 && s.TxBytes > 0 && s.SessionCountReconnectingPty == 1
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if s.ConnectionCount > 0 {
|
||||
connectionCountSeen = true
|
||||
}
|
||||
if s.RxBytes > 0 {
|
||||
rxBytesSeen = true
|
||||
}
|
||||
if s.TxBytes > 0 {
|
||||
txBytesSeen = true
|
||||
}
|
||||
if s.SessionCountReconnectingPty == 1 {
|
||||
sessionCountReconnectingPTYSeen = true
|
||||
}
|
||||
return connectionCountSeen && rxBytesSeen && txBytesSeen && sessionCountReconnectingPTYSeen
|
||||
}, testutil.WaitLong, testutil.IntervalFast,
|
||||
"never saw stats: %+v", s,
|
||||
"never saw all stats: %+v, saw connectionCount: %t, rxBytes: %t, txBytes: %t, sessionCountReconnectingPTY: %t",
|
||||
s, connectionCountSeen, rxBytesSeen, txBytesSeen, sessionCountReconnectingPTYSeen,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -218,9 +259,10 @@ func TestAgent_Stats_Magic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, strings.TrimSpace(string(output)))
|
||||
})
|
||||
|
||||
t.Run("TracksVSCode", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if runtime.GOOS == "window" {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Sleeping for infinity doesn't work on Windows")
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
@@ -252,7 +294,9 @@ func TestAgent_Stats_Magic(t *testing.T) {
|
||||
}, testutil.WaitLong, testutil.IntervalFast,
|
||||
"never saw stats",
|
||||
)
|
||||
// The shell will automatically exit if there is no stdin!
|
||||
|
||||
_, err = stdin.Write([]byte("exit 0\n"))
|
||||
require.NoError(t, err, "writing exit to stdin")
|
||||
_ = stdin.Close()
|
||||
err = session.Wait()
|
||||
require.NoError(t, err)
|
||||
@@ -3633,9 +3677,11 @@ func TestAgent_Metrics_SSH(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
_, err = stdin.Write([]byte("exit 0\n"))
|
||||
require.NoError(t, err, "writing exit to stdin")
|
||||
_ = stdin.Close()
|
||||
err = session.Wait()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "waiting for session to exit")
|
||||
}
|
||||
|
||||
// echoOnce accepts a single connection, reads 4 bytes and echos them back
|
||||
|
||||
@@ -779,10 +779,13 @@ func (api *API) watchContainers(rw http.ResponseWriter, r *http.Request) {
|
||||
// close frames.
|
||||
_ = conn.CloseRead(context.Background())
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
ctx, wsNetConn := codersdk.WebsocketNetConn(ctx, conn, websocket.MessageText)
|
||||
defer wsNetConn.Close()
|
||||
|
||||
go httpapi.Heartbeat(ctx, conn)
|
||||
go httpapi.HeartbeatClose(ctx, api.logger, cancel, conn)
|
||||
|
||||
updateCh := make(chan struct{}, 1)
|
||||
|
||||
|
||||
@@ -79,10 +79,12 @@ func TestBoundaryLogs_EndToEnd(t *testing.T) {
|
||||
logger := slog.Make(sink)
|
||||
workspaceID := uuid.New()
|
||||
templateID := uuid.New()
|
||||
templateVersionID := uuid.New()
|
||||
reporter := &agentapi.BoundaryLogsAPI{
|
||||
Log: logger,
|
||||
WorkspaceID: workspaceID,
|
||||
TemplateID: templateID,
|
||||
Log: logger,
|
||||
WorkspaceID: workspaceID,
|
||||
TemplateID: templateID,
|
||||
TemplateVersionID: templateVersionID,
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -126,6 +128,7 @@ func TestBoundaryLogs_EndToEnd(t *testing.T) {
|
||||
require.Equal(t, "allow", getField(entry.Fields, "decision"))
|
||||
require.Equal(t, workspaceID.String(), getField(entry.Fields, "workspace_id"))
|
||||
require.Equal(t, templateID.String(), getField(entry.Fields, "template_id"))
|
||||
require.Equal(t, templateVersionID.String(), getField(entry.Fields, "template_version_id"))
|
||||
require.Equal(t, "GET", getField(entry.Fields, "http_method"))
|
||||
require.Equal(t, "https://example.com/allowed", getField(entry.Fields, "http_url"))
|
||||
require.Equal(t, "*.example.com", getField(entry.Fields, "matched_rule"))
|
||||
@@ -159,6 +162,7 @@ func TestBoundaryLogs_EndToEnd(t *testing.T) {
|
||||
require.Equal(t, "deny", getField(entry.Fields, "decision"))
|
||||
require.Equal(t, workspaceID.String(), getField(entry.Fields, "workspace_id"))
|
||||
require.Equal(t, templateID.String(), getField(entry.Fields, "template_id"))
|
||||
require.Equal(t, templateVersionID.String(), getField(entry.Fields, "template_version_id"))
|
||||
require.Equal(t, "POST", getField(entry.Fields, "http_method"))
|
||||
require.Equal(t, "https://blocked.com/denied", getField(entry.Fields, "http_url"))
|
||||
require.Equal(t, nil, getField(entry.Fields, "matched_rule"))
|
||||
|
||||
@@ -81,6 +81,10 @@ type BackedPipe struct {
|
||||
// Unified error handling with generation filtering
|
||||
errChan chan ErrorEvent
|
||||
|
||||
// forceReconnectHook is a test hook invoked after ForceReconnect registers
|
||||
// with the singleflight group.
|
||||
forceReconnectHook func()
|
||||
|
||||
// singleflight group to dedupe concurrent ForceReconnect calls
|
||||
sf singleflight.Group
|
||||
|
||||
@@ -324,6 +328,13 @@ func (bp *BackedPipe) handleConnectionError(errorEvt ErrorEvent) {
|
||||
}
|
||||
}
|
||||
|
||||
// SetForceReconnectHookForTests sets a hook invoked after ForceReconnect
|
||||
// registers with the singleflight group. It must be set before any
|
||||
// concurrent ForceReconnect calls.
|
||||
func (bp *BackedPipe) SetForceReconnectHookForTests(hook func()) {
|
||||
bp.forceReconnectHook = hook
|
||||
}
|
||||
|
||||
// ForceReconnect forces a reconnection attempt immediately.
|
||||
// This can be used to force a reconnection if a new connection is established.
|
||||
// It prevents duplicate reconnections when called concurrently.
|
||||
@@ -331,7 +342,7 @@ func (bp *BackedPipe) ForceReconnect() error {
|
||||
// Deduplicate concurrent ForceReconnect calls so only one reconnection
|
||||
// attempt runs at a time from this API. Use the pipe's internal context
|
||||
// to ensure Close() cancels any in-flight attempt.
|
||||
_, err, _ := bp.sf.Do("force-reconnect", func() (interface{}, error) {
|
||||
resultChan := bp.sf.DoChan("force-reconnect", func() (interface{}, error) {
|
||||
bp.mu.Lock()
|
||||
defer bp.mu.Unlock()
|
||||
|
||||
@@ -346,5 +357,11 @@ func (bp *BackedPipe) ForceReconnect() error {
|
||||
|
||||
return nil, bp.reconnectLocked()
|
||||
})
|
||||
return err
|
||||
|
||||
if hook := bp.forceReconnectHook; hook != nil {
|
||||
hook()
|
||||
}
|
||||
|
||||
result := <-resultChan
|
||||
return result.Err
|
||||
}
|
||||
|
||||
@@ -742,12 +742,15 @@ func TestBackedPipe_DuplicateReconnectionPrevention(t *testing.T) {
|
||||
|
||||
const numConcurrent = 3
|
||||
startSignals := make([]chan struct{}, numConcurrent)
|
||||
startedSignals := make([]chan struct{}, numConcurrent)
|
||||
for i := range startSignals {
|
||||
startSignals[i] = make(chan struct{})
|
||||
startedSignals[i] = make(chan struct{})
|
||||
}
|
||||
|
||||
enteredSignals := make(chan struct{}, numConcurrent)
|
||||
bp.SetForceReconnectHookForTests(func() {
|
||||
enteredSignals <- struct{}{}
|
||||
})
|
||||
|
||||
errors := make([]error, numConcurrent)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
@@ -758,15 +761,12 @@ func TestBackedPipe_DuplicateReconnectionPrevention(t *testing.T) {
|
||||
defer wg.Done()
|
||||
// Wait for the signal to start
|
||||
<-startSignals[idx]
|
||||
// Signal that we're about to call ForceReconnect
|
||||
close(startedSignals[idx])
|
||||
errors[idx] = bp.ForceReconnect()
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Start the first ForceReconnect and wait for it to block
|
||||
close(startSignals[0])
|
||||
<-startedSignals[0]
|
||||
|
||||
// Wait for the first reconnect to actually start and block
|
||||
testutil.RequireReceive(testCtx, t, blockedChan)
|
||||
@@ -777,9 +777,9 @@ func TestBackedPipe_DuplicateReconnectionPrevention(t *testing.T) {
|
||||
close(startSignals[i])
|
||||
}
|
||||
|
||||
// Wait for all additional goroutines to have started their calls
|
||||
for i := 1; i < numConcurrent; i++ {
|
||||
<-startedSignals[i]
|
||||
// Wait for all ForceReconnect calls to join the singleflight operation.
|
||||
for i := 0; i < numConcurrent; i++ {
|
||||
testutil.RequireReceive(testCtx, t, enteredSignals)
|
||||
}
|
||||
|
||||
// At this point, one reconnect has started and is blocked,
|
||||
|
||||
@@ -7,6 +7,6 @@ func IsInitProcess() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func ForkReap(_ ...Option) error {
|
||||
return nil
|
||||
func ForkReap(_ ...Option) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
@@ -32,12 +32,13 @@ func TestReap(t *testing.T) {
|
||||
}
|
||||
|
||||
pids := make(reap.PidCh, 1)
|
||||
err := reaper.ForkReap(
|
||||
exitCode, err := reaper.ForkReap(
|
||||
reaper.WithPIDCallback(pids),
|
||||
// Provide some argument that immediately exits.
|
||||
reaper.WithExecArgs("/bin/sh", "-c", "exit 0"),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, exitCode)
|
||||
|
||||
cmd := exec.Command("tail", "-f", "/dev/null")
|
||||
err = cmd.Start()
|
||||
@@ -65,6 +66,36 @@ func TestReap(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:paralleltest
|
||||
func TestForkReapExitCodes(t *testing.T) {
|
||||
if testutil.InCI() {
|
||||
t.Skip("Detected CI, skipping reaper tests")
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
command string
|
||||
expectedCode int
|
||||
}{
|
||||
{"exit 0", "exit 0", 0},
|
||||
{"exit 1", "exit 1", 1},
|
||||
{"exit 42", "exit 42", 42},
|
||||
{"exit 255", "exit 255", 255},
|
||||
{"SIGKILL", "kill -9 $$", 128 + 9},
|
||||
{"SIGTERM", "kill -15 $$", 128 + 15},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
exitCode, err := reaper.ForkReap(
|
||||
reaper.WithExecArgs("/bin/sh", "-c", tt.command),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedCode, exitCode, "exit code mismatch for %q", tt.command)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:paralleltest // Signal handling.
|
||||
func TestReapInterrupt(t *testing.T) {
|
||||
// Don't run the reaper test in CI. It does weird
|
||||
@@ -84,13 +115,17 @@ func TestReapInterrupt(t *testing.T) {
|
||||
defer signal.Stop(usrSig)
|
||||
|
||||
go func() {
|
||||
errC <- reaper.ForkReap(
|
||||
exitCode, err := reaper.ForkReap(
|
||||
reaper.WithPIDCallback(pids),
|
||||
reaper.WithCatchSignals(os.Interrupt),
|
||||
// Signal propagation does not extend to children of children, so
|
||||
// we create a little bash script to ensure sleep is interrupted.
|
||||
reaper.WithExecArgs("/bin/sh", "-c", fmt.Sprintf("pid=0; trap 'kill -USR2 %d; kill -TERM $pid' INT; sleep 10 &\npid=$!; kill -USR1 %d; wait", os.Getpid(), os.Getpid())),
|
||||
)
|
||||
// The child exits with 128 + SIGTERM (15) = 143, but the trap catches
|
||||
// SIGINT and sends SIGTERM to the sleep process, so exit code varies.
|
||||
_ = exitCode
|
||||
errC <- err
|
||||
}()
|
||||
|
||||
require.Equal(t, <-usrSig, syscall.SIGUSR1)
|
||||
|
||||
@@ -40,7 +40,10 @@ func catchSignals(pid int, sigs []os.Signal) {
|
||||
// the reaper and an exec.Command waiting for its process to complete.
|
||||
// The provided 'pids' channel may be nil if the caller does not care about the
|
||||
// reaped children PIDs.
|
||||
func ForkReap(opt ...Option) error {
|
||||
//
|
||||
// Returns the child's exit code (using 128+signal for signal termination)
|
||||
// and any error from Wait4.
|
||||
func ForkReap(opt ...Option) (int, error) {
|
||||
opts := &options{
|
||||
ExecArgs: os.Args,
|
||||
}
|
||||
@@ -53,7 +56,7 @@ func ForkReap(opt ...Option) error {
|
||||
|
||||
pwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get wd: %w", err)
|
||||
return 1, xerrors.Errorf("get wd: %w", err)
|
||||
}
|
||||
|
||||
pattrs := &syscall.ProcAttr{
|
||||
@@ -72,7 +75,7 @@ func ForkReap(opt ...Option) error {
|
||||
//#nosec G204
|
||||
pid, err := syscall.ForkExec(opts.ExecArgs[0], opts.ExecArgs, pattrs)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fork exec: %w", err)
|
||||
return 1, xerrors.Errorf("fork exec: %w", err)
|
||||
}
|
||||
|
||||
go catchSignals(pid, opts.CatchSignals)
|
||||
@@ -82,5 +85,18 @@ func ForkReap(opt ...Option) error {
|
||||
for xerrors.Is(err, syscall.EINTR) {
|
||||
_, err = syscall.Wait4(pid, &wstatus, 0, nil)
|
||||
}
|
||||
return err
|
||||
|
||||
// Convert wait status to exit code using standard Unix conventions:
|
||||
// - Normal exit: use the exit code
|
||||
// - Signal termination: use 128 + signal number
|
||||
var exitCode int
|
||||
switch {
|
||||
case wstatus.Exited():
|
||||
exitCode = wstatus.ExitStatus()
|
||||
case wstatus.Signaled():
|
||||
exitCode = 128 + int(wstatus.Signal())
|
||||
default:
|
||||
exitCode = 1
|
||||
}
|
||||
return exitCode, err
|
||||
}
|
||||
|
||||
+3
-3
@@ -136,7 +136,7 @@ func workspaceAgent() *serpent.Command {
|
||||
// to do this else we fork bomb ourselves.
|
||||
//nolint:gocritic
|
||||
args := append(os.Args, "--no-reap")
|
||||
err := reaper.ForkReap(
|
||||
exitCode, err := reaper.ForkReap(
|
||||
reaper.WithExecArgs(args...),
|
||||
reaper.WithCatchSignals(StopSignals...),
|
||||
)
|
||||
@@ -145,8 +145,8 @@ func workspaceAgent() *serpent.Command {
|
||||
return xerrors.Errorf("fork reap: %w", err)
|
||||
}
|
||||
|
||||
logger.Info(ctx, "reaper process exiting")
|
||||
return nil
|
||||
logger.Info(ctx, "reaper child process exited", slog.F("exit_code", exitCode))
|
||||
return ExitError(exitCode, nil)
|
||||
}
|
||||
|
||||
// Handle interrupt signals to allow for graceful shutdown,
|
||||
|
||||
@@ -491,6 +491,11 @@ func (m multiSelectModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
|
||||
case tea.KeySpace:
|
||||
options := m.filteredOptions()
|
||||
|
||||
if m.enableCustomInput && m.cursor == len(options) {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
if len(options) != 0 {
|
||||
options[m.cursor].chosen = !options[m.cursor].chosen
|
||||
}
|
||||
|
||||
+15
-49
@@ -46,7 +46,6 @@ func (r *RootCmd) Create(opts CreateOptions) *serpent.Command {
|
||||
autoUpdates string
|
||||
copyParametersFrom string
|
||||
useParameterDefaults bool
|
||||
nonInteractive bool
|
||||
// Organization context is only required if more than 1 template
|
||||
// shares the same name across multiple organizations.
|
||||
orgContext = NewOrganizationContext()
|
||||
@@ -76,9 +75,6 @@ func (r *RootCmd) Create(opts CreateOptions) *serpent.Command {
|
||||
}
|
||||
|
||||
if workspaceName == "" {
|
||||
if nonInteractive {
|
||||
return xerrors.New("workspace name must be provided as an argument in non-interactive mode")
|
||||
}
|
||||
workspaceName, err = cliui.Prompt(inv, cliui.PromptOptions{
|
||||
Text: "Specify a name for your workspace:",
|
||||
Validate: func(workspaceName string) error {
|
||||
@@ -126,25 +122,13 @@ func (r *RootCmd) Create(opts CreateOptions) *serpent.Command {
|
||||
var templateVersionID uuid.UUID
|
||||
switch {
|
||||
case templateName == "":
|
||||
_, _ = fmt.Fprintln(inv.Stdout, pretty.Sprint(cliui.DefaultStyles.Wrap, "Select a template below to preview the provisioned infrastructure:"))
|
||||
|
||||
templates, err := client.Templates(inv.Context(), codersdk.TemplateFilter{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(templates) == 0 {
|
||||
return xerrors.New("no templates available")
|
||||
}
|
||||
|
||||
if nonInteractive {
|
||||
if len(templates) == 1 {
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "Using the only available template: %q\n", templates[0].Name)
|
||||
template = templates[0]
|
||||
templateVersionID = template.ActiveVersionID
|
||||
break
|
||||
}
|
||||
return xerrors.New("multiple templates available; use --template to specify which to use")
|
||||
}
|
||||
|
||||
slices.SortFunc(templates, func(a, b codersdk.Template) int {
|
||||
return slice.Descending(a.ActiveUserCount, b.ActiveUserCount)
|
||||
})
|
||||
@@ -183,8 +167,6 @@ func (r *RootCmd) Create(opts CreateOptions) *serpent.Command {
|
||||
templateByName[templateName] = template
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintln(inv.Stdout, pretty.Sprint(cliui.DefaultStyles.Wrap, "Select a template below to preview the provisioned infrastructure:"))
|
||||
|
||||
// Move the cursor up a single line for nicer display!
|
||||
option, err := cliui.Select(inv, cliui.SelectOptions{
|
||||
Options: templateNames,
|
||||
@@ -315,24 +297,19 @@ func (r *RootCmd) Create(opts CreateOptions) *serpent.Command {
|
||||
if !errors.Is(err, ErrNoPresetFound) {
|
||||
return xerrors.Errorf("unable to resolve preset: %w", err)
|
||||
}
|
||||
// If no preset found, prompt the user to choose a preset, unless in
|
||||
// non-interactive mode, in which case no preset is used.
|
||||
if !nonInteractive {
|
||||
if preset, err = promptPresetSelection(inv, tvPresets); err != nil {
|
||||
return xerrors.Errorf("unable to prompt user for preset: %w", err)
|
||||
}
|
||||
// If no preset found, prompt the user to choose a preset
|
||||
if preset, err = promptPresetSelection(inv, tvPresets); err != nil {
|
||||
return xerrors.Errorf("unable to prompt user for preset: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if preset == nil {
|
||||
// Inform the user when no preset will be applied.
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "%s\n", cliui.Bold("No preset applied."))
|
||||
} else {
|
||||
// Convert preset parameters into workspace build parameters
|
||||
presetParameters = presetParameterAsWorkspaceBuildParameters(preset.Parameters)
|
||||
// Inform the user which preset was applied and its parameters
|
||||
displayAppliedPreset(inv, preset, presetParameters)
|
||||
} else {
|
||||
// Inform the user that no preset was applied
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "%s\n", cliui.Bold("No preset applied."))
|
||||
}
|
||||
|
||||
if opts.BeforeCreate != nil {
|
||||
@@ -355,20 +332,17 @@ func (r *RootCmd) Create(opts CreateOptions) *serpent.Command {
|
||||
SourceWorkspaceParameters: sourceWorkspaceParameters,
|
||||
|
||||
UseParameterDefaults: useParameterDefaults,
|
||||
NonInteractive: nonInteractive,
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("prepare build: %w", err)
|
||||
}
|
||||
|
||||
if !nonInteractive {
|
||||
_, err = cliui.Prompt(inv, cliui.PromptOptions{
|
||||
Text: "Confirm create?",
|
||||
IsConfirm: true,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = cliui.Prompt(inv, cliui.PromptOptions{
|
||||
Text: "Confirm create?",
|
||||
IsConfirm: true,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ttlMillis *int64
|
||||
@@ -470,12 +444,6 @@ func (r *RootCmd) Create(opts CreateOptions) *serpent.Command {
|
||||
Description: "Automatically accept parameter defaults when no value is provided.",
|
||||
Value: serpent.BoolOf(&useParameterDefaults),
|
||||
},
|
||||
serpent.Option{
|
||||
Flag: "non-interactive",
|
||||
Env: "CODER_NON_INTERACTIVE",
|
||||
Description: "Automatically accept all defaults and error when there is no default for a required input.",
|
||||
Value: serpent.BoolOf(&nonInteractive),
|
||||
},
|
||||
cliui.SkipPromptOption(),
|
||||
)
|
||||
cmd.Options = append(cmd.Options, parameterFlags.cliParameters()...)
|
||||
@@ -502,7 +470,6 @@ type prepWorkspaceBuildArgs struct {
|
||||
RichParameterDefaults []codersdk.WorkspaceBuildParameter
|
||||
|
||||
UseParameterDefaults bool
|
||||
NonInteractive bool
|
||||
}
|
||||
|
||||
// resolvePreset returns the preset matching the given presetName (if specified),
|
||||
@@ -606,8 +573,7 @@ func prepWorkspaceBuild(inv *serpent.Invocation, client *codersdk.Client, args p
|
||||
WithRichParameters(args.RichParameters).
|
||||
WithRichParametersFile(parameterFile).
|
||||
WithRichParametersDefaults(args.RichParameterDefaults).
|
||||
WithUseParameterDefaults(args.UseParameterDefaults).
|
||||
WithNonInteractive(args.NonInteractive)
|
||||
WithUseParameterDefaults(args.UseParameterDefaults)
|
||||
buildParameters, err := resolver.Resolve(inv, args.Action, templateVersionParameters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
+14
-230
@@ -139,12 +139,15 @@ func TestCreate(t *testing.T) {
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
member, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, completeWithAgent())
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, completeWithAgent(), func(ctvr *codersdk.CreateTemplateVersionRequest) {
|
||||
ctvr.Name = "v1"
|
||||
})
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
|
||||
// Create a new version
|
||||
version2 := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, completeWithAgent(), func(ctvr *codersdk.CreateTemplateVersionRequest) {
|
||||
ctvr.Name = "v2"
|
||||
ctvr.TemplateID = template.ID
|
||||
})
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version2.ID)
|
||||
@@ -297,117 +300,6 @@ func TestCreate(t *testing.T) {
|
||||
assert.Nil(t, ws.AutostartSchedule, "expected workspace autostart schedule to be nil")
|
||||
}
|
||||
})
|
||||
|
||||
tests := []struct {
|
||||
// name is the name of the test.
|
||||
name string
|
||||
// setup runs before the command is started and returns arguments that
|
||||
// will be appended to the create command.
|
||||
setup func(client *codersdk.Client, owner codersdk.CreateFirstUserResponse) []string
|
||||
// handlePty optionally runs after the command is started. It should handle
|
||||
// all expected prompts from the pty.
|
||||
handlePty func(ctx context.Context, pty *ptytest.PTY)
|
||||
// errors contains expected errors. The workspace will not be verified if
|
||||
// errors are expected.
|
||||
errors []string
|
||||
}{
|
||||
{
|
||||
name: "NoWorkspaceNameNonInteractive",
|
||||
setup: func(_ *codersdk.Client, _ codersdk.CreateFirstUserResponse) []string {
|
||||
return []string{"--non-interactive"}
|
||||
},
|
||||
errors: []string{
|
||||
"workspace name must be provided",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "OneTemplateNonInteractive",
|
||||
setup: func(_ *codersdk.Client, _ codersdk.CreateFirstUserResponse) []string {
|
||||
return []string{"my-workspace", "--non-interactive"}
|
||||
},
|
||||
handlePty: func(ctx context.Context, pty *ptytest.PTY) {
|
||||
pty.ExpectMatchContext(ctx, "Using the only available template")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MultipleTemplatesNonInteractive",
|
||||
setup: func(client *codersdk.Client, owner codersdk.CreateFirstUserResponse) []string {
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, nil)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
_ = coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
return []string{"my-workspace", "--non-interactive"}
|
||||
},
|
||||
errors: []string{
|
||||
"multiple templates available; use --template to specify which to use",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MultipleTemplatesInteractive",
|
||||
setup: func(client *codersdk.Client, owner codersdk.CreateFirstUserResponse) []string {
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, nil)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
_ = coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
return []string{"my-workspace", "--yes"}
|
||||
},
|
||||
handlePty: func(ctx context.Context, pty *ptytest.PTY) {
|
||||
pty.ExpectMatchContext(ctx, "Select a template below")
|
||||
pty.WriteLine("") // Select whatever is first.
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Set up the template.
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
member, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, nil)
|
||||
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
_ = coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
|
||||
// Run the command, after running any additional test setup.
|
||||
args := []string{"create"}
|
||||
if tt.setup != nil {
|
||||
args = append(args, tt.setup(client, owner)...)
|
||||
}
|
||||
inv, root := clitest.New(t, args...)
|
||||
clitest.SetupConfig(t, member, root)
|
||||
doneChan := make(chan error)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
go func() {
|
||||
doneChan <- inv.Run()
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
// The test may do something with the pty.
|
||||
if tt.handlePty != nil {
|
||||
tt.handlePty(ctx, pty)
|
||||
}
|
||||
|
||||
// Wait for the command to exit.
|
||||
err := <-doneChan
|
||||
|
||||
if len(tt.errors) > 0 {
|
||||
require.Error(t, err)
|
||||
for _, errstr := range tt.errors {
|
||||
require.ErrorContains(t, err, errstr)
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the workspace was created.
|
||||
workspaces, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{Name: "my-workspace"})
|
||||
require.NoError(t, err, "expected to find created workspace")
|
||||
require.Len(t, workspaces.Workspaces, 1)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func prepareEchoResponses(parameters []*proto.RichParameter, presets ...*proto.Preset) *echo.Responses {
|
||||
@@ -430,11 +322,10 @@ func prepareEchoResponses(parameters []*proto.RichParameter, presets ...*proto.P
|
||||
}
|
||||
|
||||
type param struct {
|
||||
name string
|
||||
ptype string
|
||||
value string
|
||||
mutable bool
|
||||
required bool
|
||||
name string
|
||||
ptype string
|
||||
value string
|
||||
mutable bool
|
||||
}
|
||||
|
||||
func TestCreateWithRichParameters(t *testing.T) {
|
||||
@@ -481,7 +372,7 @@ func TestCreateWithRichParameters(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
// setup runs before the command is started and returns arguments that will
|
||||
// setup runs before the command is started and return arguments that will
|
||||
// be appended to the create command.
|
||||
setup func() []string
|
||||
// handlePty optionally runs after the command is started. It should handle
|
||||
@@ -628,6 +519,7 @@ func TestCreateWithRichParameters(t *testing.T) {
|
||||
version2 := coderdtest.CreateTemplateVersion(t, tctx.client, tctx.owner.OrganizationID, prepareEchoResponses([]*proto.RichParameter{
|
||||
{Name: "another_parameter", Type: "string", DefaultValue: "not-relevant"},
|
||||
}), func(ctvr *codersdk.CreateTemplateVersionRequest) {
|
||||
ctvr.Name = "v2"
|
||||
ctvr.TemplateID = tctx.template.ID
|
||||
})
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, tctx.client, version2.ID)
|
||||
@@ -650,7 +542,7 @@ func TestCreateWithRichParameters(t *testing.T) {
|
||||
// Simply accept the defaults.
|
||||
for _, param := range params {
|
||||
pty.ExpectMatch(param.name)
|
||||
pty.ExpectMatch(fmt.Sprintf("Enter a value (default: %q)", param.value))
|
||||
pty.ExpectMatch(`Enter a value (default: "` + param.value + `")`)
|
||||
pty.WriteLine("")
|
||||
}
|
||||
// Confirm the creation.
|
||||
@@ -667,7 +559,7 @@ func TestCreateWithRichParameters(t *testing.T) {
|
||||
handlePty: func(pty *ptytest.PTY) {
|
||||
// Default values should get printed.
|
||||
for _, param := range params {
|
||||
pty.ExpectMatch(fmt.Sprintf("%q: '%s'", param.name, param.value))
|
||||
pty.ExpectMatch(fmt.Sprintf("%s: '%s'", param.name, param.value))
|
||||
}
|
||||
// No prompts, we only need to confirm.
|
||||
pty.ExpectMatch("Confirm create?")
|
||||
@@ -675,19 +567,6 @@ func TestCreateWithRichParameters(t *testing.T) {
|
||||
},
|
||||
withDefaults: true,
|
||||
},
|
||||
{
|
||||
name: "ValuesFromTemplateDefaultsNonInteractive",
|
||||
setup: func() []string {
|
||||
return []string{"--non-interactive"}
|
||||
},
|
||||
handlePty: func(pty *ptytest.PTY) {
|
||||
// Default values should get printed.
|
||||
for _, param := range params {
|
||||
pty.ExpectMatch(fmt.Sprintf("%q: '%s'", param.name, param.value))
|
||||
}
|
||||
},
|
||||
withDefaults: true,
|
||||
},
|
||||
{
|
||||
name: "ValuesFromDefaultFlagsNoPrompt",
|
||||
setup: func() []string {
|
||||
@@ -701,45 +580,13 @@ func TestCreateWithRichParameters(t *testing.T) {
|
||||
handlePty: func(pty *ptytest.PTY) {
|
||||
// Default values should get printed.
|
||||
for _, param := range params {
|
||||
pty.ExpectMatch(fmt.Sprintf("%q: '%s'", param.name, param.value))
|
||||
pty.ExpectMatch(fmt.Sprintf("%s: '%s'", param.name, param.value))
|
||||
}
|
||||
// No prompts, we only need to confirm.
|
||||
pty.ExpectMatch("Confirm create?")
|
||||
pty.WriteLine("yes")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ValuesFromDefaultFlagsNonInteractive",
|
||||
setup: func() []string {
|
||||
// Provide the defaults on the command line.
|
||||
args := []string{"--non-interactive"}
|
||||
for _, param := range params {
|
||||
args = append(args, "--parameter-default", fmt.Sprintf("%s=%s", param.name, param.value))
|
||||
}
|
||||
return args
|
||||
},
|
||||
handlePty: func(pty *ptytest.PTY) {
|
||||
// Default values should get printed.
|
||||
for _, param := range params {
|
||||
pty.ExpectMatch(fmt.Sprintf("%q: '%s'", param.name, param.value))
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ValuesMissingNonInteractive",
|
||||
setup: func() []string {
|
||||
return []string{"--non-interactive"}
|
||||
},
|
||||
inputParameters: []param{
|
||||
{
|
||||
name: "required_param",
|
||||
required: true,
|
||||
},
|
||||
},
|
||||
errors: []string{
|
||||
"parameter \"required_param\" is required and has no default value",
|
||||
},
|
||||
},
|
||||
{
|
||||
// File and flags should override template defaults. Additionally, if a
|
||||
// value has no default value we should still get a prompt for it.
|
||||
@@ -828,7 +675,6 @@ cli_param: from file`)
|
||||
Name: param.name,
|
||||
Type: param.ptype,
|
||||
Mutable: param.mutable,
|
||||
Required: param.required,
|
||||
DefaultValue: defaultValue,
|
||||
Order: int32(i), //nolint:gosec
|
||||
})
|
||||
@@ -879,7 +725,7 @@ cli_param: from file`)
|
||||
if len(tt.errors) > 0 {
|
||||
require.Error(t, err)
|
||||
for _, errstr := range tt.errors {
|
||||
require.ErrorContains(t, err, errstr)
|
||||
assert.ErrorContains(t, err, errstr)
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
@@ -1185,68 +1031,6 @@ func TestCreateWithPreset(t *testing.T) {
|
||||
require.Contains(t, buildParameters, codersdk.WorkspaceBuildParameter{Name: thirdParameterName, Value: thirdParameterValue})
|
||||
})
|
||||
|
||||
t.Run("NoDefaultPresetNonInteractive", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
member, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
|
||||
// Given: a template and a template version with a preset but no default.
|
||||
preset := proto.Preset{
|
||||
Name: "preset-test",
|
||||
Description: "Preset Test.",
|
||||
Parameters: []*proto.PresetParameter{
|
||||
{Name: firstParameterName, Value: secondOptionalParameterValue},
|
||||
{Name: thirdParameterName, Value: thirdParameterValue},
|
||||
},
|
||||
}
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, echoResponses(&preset))
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
|
||||
// When: running the create command without specifying a preset
|
||||
workspaceName := "my-workspace"
|
||||
inv, root := clitest.New(t, "create", workspaceName, "--template", template.Name,
|
||||
"--parameter", fmt.Sprintf("%s=%s", firstParameterName, firstOptionalParameterValue),
|
||||
"--parameter", fmt.Sprintf("%s=%s", thirdParameterName, thirdParameterValue),
|
||||
"--non-interactive")
|
||||
clitest.SetupConfig(t, member, root)
|
||||
doneChan := make(chan struct{})
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
// Should not prompt the user for the preset.
|
||||
pty.ExpectMatchContext(ctx, "No preset applied")
|
||||
|
||||
<-doneChan
|
||||
|
||||
tvPresets, err := client.TemplateVersionPresets(ctx, version.ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, tvPresets, 1)
|
||||
|
||||
workspaces, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{Name: workspaceName})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, workspaces.Workspaces, 1)
|
||||
|
||||
// Should: create a workspace using the expected template version and no preset
|
||||
workspaceLatestBuild := workspaces.Workspaces[0].LatestBuild
|
||||
require.Equal(t, version.ID, workspaceLatestBuild.TemplateVersionID)
|
||||
require.Nil(t, workspaceLatestBuild.TemplateVersionPresetID)
|
||||
buildParameters, err := client.WorkspaceBuildParameters(ctx, workspaceLatestBuild.ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, buildParameters, 2)
|
||||
require.Contains(t, buildParameters, codersdk.WorkspaceBuildParameter{Name: firstParameterName, Value: firstOptionalParameterValue})
|
||||
require.Contains(t, buildParameters, codersdk.WorkspaceBuildParameter{Name: thirdParameterName, Value: thirdParameterValue})
|
||||
})
|
||||
|
||||
// This test verifies that when a template version has no presets,
|
||||
// the CLI does not prompt the user to select a preset and proceeds
|
||||
// with workspace creation without applying any preset.
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
boundarycli "github.com/coder/boundary/cli"
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
|
||||
func (*RootCmd) boundary() *serpent.Command {
|
||||
cmd := boundarycli.BaseCommand() // Package coder/boundary/cli exports a "base command" designed to be integrated as a subcommand.
|
||||
cmd.Use += " [args...]" // The base command looks like `boundary -- command`. Serpent adds the flags piece, but we need to add the args.
|
||||
return cmd
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
boundarycli "github.com/coder/boundary/cli"
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
// Actually testing the functionality of coder/boundary takes place in the
|
||||
// coder/boundary repo, since it's a dependency of coder.
|
||||
// Here we want to test basically that integrating it as a subcommand doesn't break anything.
|
||||
func TestBoundarySubcommand(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
inv, _ := clitest.New(t, "exp", "boundary", "--help")
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
|
||||
go func() {
|
||||
err := inv.WithContext(ctx).Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
// Expect the --help output to include the short description.
|
||||
// We're simply confirming that `coder boundary --help` ran without a runtime error as
|
||||
// a good chunk of serpents self validation logic happens at runtime.
|
||||
pty.ExpectMatch(boundarycli.BaseCommand().Short)
|
||||
}
|
||||
@@ -174,6 +174,19 @@ func (RootCmd) promptExample() *serpent.Command {
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "%q are nice choices.\n", strings.Join(multiSelectValues, ", "))
|
||||
return multiSelectError
|
||||
}, useThingsOption, enableCustomInputOption),
|
||||
promptCmd("multi-select-no-defaults", func(inv *serpent.Invocation) error {
|
||||
if len(multiSelectValues) == 0 {
|
||||
multiSelectValues, multiSelectError = cliui.MultiSelect(inv, cliui.MultiSelectOptions{
|
||||
Message: "Select some things:",
|
||||
Options: []string{
|
||||
"Code", "Chairs", "Whale",
|
||||
},
|
||||
EnableCustomInput: enableCustomInput,
|
||||
})
|
||||
}
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "%q are nice choices.\n", strings.Join(multiSelectValues, ", "))
|
||||
return multiSelectError
|
||||
}, useThingsOption, enableCustomInputOption),
|
||||
promptCmd("rich-multi-select", func(inv *serpent.Invocation) error {
|
||||
if len(multiSelectValues) == 0 {
|
||||
multiSelectValues, multiSelectError = cliui.MultiSelect(inv, cliui.MultiSelectOptions{
|
||||
|
||||
+9
-3
@@ -141,7 +141,9 @@ func TestGitSSH(t *testing.T) {
|
||||
"-o", "IdentitiesOnly=yes",
|
||||
"127.0.0.1",
|
||||
)
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
// This occasionally times out at 15s on Windows CI runners. Use a
|
||||
// longer timeout to reduce flakes.
|
||||
ctx := testutil.Context(t, testutil.WaitSuperLong)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, inc)
|
||||
@@ -205,7 +207,9 @@ func TestGitSSH(t *testing.T) {
|
||||
inv, _ := clitest.New(t, cmdArgs...)
|
||||
inv.Stdout = pty.Output()
|
||||
inv.Stderr = pty.Output()
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
// This occasionally times out at 15s on Windows CI runners. Use a
|
||||
// longer timeout to reduce flakes.
|
||||
ctx := testutil.Context(t, testutil.WaitSuperLong)
|
||||
err = inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
select {
|
||||
@@ -223,7 +227,9 @@ func TestGitSSH(t *testing.T) {
|
||||
inv, _ = clitest.New(t, cmdArgs...)
|
||||
inv.Stdout = pty.Output()
|
||||
inv.Stderr = pty.Output()
|
||||
ctx = testutil.Context(t, testutil.WaitMedium) // Reset context for second cmd test.
|
||||
// This occasionally times out at 15s on Windows CI runners. Use a
|
||||
// longer timeout to reduce flakes.
|
||||
ctx = testutil.Context(t, testutil.WaitSuperLong) // Reset context for second cmd test.
|
||||
err = inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
select {
|
||||
|
||||
@@ -462,9 +462,38 @@ func (r *RootCmd) login() *serpent.Command {
|
||||
Value: serpent.BoolOf(&useTokenForSession),
|
||||
},
|
||||
}
|
||||
cmd.Children = []*serpent.Command{
|
||||
r.loginToken(),
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (r *RootCmd) loginToken() *serpent.Command {
|
||||
return &serpent.Command{
|
||||
Use: "token",
|
||||
Short: "Print the current session token",
|
||||
Long: "Print the session token for use in scripts and automation.",
|
||||
Middleware: serpent.RequireNArgs(0),
|
||||
Handler: func(inv *serpent.Invocation) error {
|
||||
tok, err := r.ensureTokenBackend().Read(r.clientURL)
|
||||
if err != nil {
|
||||
if xerrors.Is(err, os.ErrNotExist) {
|
||||
return xerrors.New("no session token found - run 'coder login' first")
|
||||
}
|
||||
if xerrors.Is(err, sessionstore.ErrNotImplemented) {
|
||||
return errKeyringNotSupported
|
||||
}
|
||||
return xerrors.Errorf("read session token: %w", err)
|
||||
}
|
||||
if tok == "" {
|
||||
return xerrors.New("no session token found - run 'coder login' first")
|
||||
}
|
||||
_, err = fmt.Fprintln(inv.Stdout, tok)
|
||||
return err
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// isWSL determines if coder-cli is running within Windows Subsystem for Linux
|
||||
func isWSL() (bool, error) {
|
||||
if runtime.GOOS == goosDarwin || runtime.GOOS == goosWindows {
|
||||
|
||||
@@ -537,3 +537,31 @@ func TestLogin(t *testing.T) {
|
||||
require.Equal(t, selected, first.OrganizationID.String())
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoginToken(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("PrintsToken", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
inv, root := clitest.New(t, "login", "token", "--url", client.URL.String())
|
||||
clitest.SetupConfig(t, client, root)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
pty.ExpectMatch(client.SessionToken())
|
||||
})
|
||||
|
||||
t.Run("NoTokenStored", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
inv, _ := clitest.New(t, "login", "token")
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "no session token found")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -35,7 +35,6 @@ type ParameterResolver struct {
|
||||
promptRichParameters bool
|
||||
promptEphemeralParameters bool
|
||||
useParameterDefaults bool
|
||||
nonInteractive bool
|
||||
}
|
||||
|
||||
func (pr *ParameterResolver) WithLastBuildParameters(params []codersdk.WorkspaceBuildParameter) *ParameterResolver {
|
||||
@@ -93,11 +92,6 @@ func (pr *ParameterResolver) WithUseParameterDefaults(useParameterDefaults bool)
|
||||
return pr
|
||||
}
|
||||
|
||||
func (pr *ParameterResolver) WithNonInteractive(nonInteractive bool) *ParameterResolver {
|
||||
pr.nonInteractive = nonInteractive
|
||||
return pr
|
||||
}
|
||||
|
||||
// Resolve gathers workspace build parameters in a layered fashion, applying
|
||||
// values from various sources in order of precedence:
|
||||
// 1. template defaults (if auto-accepting defaults)
|
||||
@@ -292,16 +286,10 @@ func (pr *ParameterResolver) resolveWithInput(resolved []codersdk.WorkspaceBuild
|
||||
parameterValue = v
|
||||
}
|
||||
|
||||
switch {
|
||||
// Auto-accept the default if there is one.
|
||||
case (pr.nonInteractive || pr.useParameterDefaults) && parameterValue != "":
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "Using default value for %q: '%s'\n", tvp.Name, parameterValue)
|
||||
case pr.nonInteractive && tvp.Required:
|
||||
return nil, xerrors.Errorf("parameter %q is required and has no default value", tvp.Name)
|
||||
case pr.nonInteractive:
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "Skipping optional parameter %q\n", tvp.Name)
|
||||
continue
|
||||
default:
|
||||
if pr.useParameterDefaults && parameterValue != "" {
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "Using default value for %s: '%s'\n", name, parameterValue)
|
||||
} else {
|
||||
var err error
|
||||
parameterValue, err = cliui.RichParameter(inv, tvp, name, parameterValue)
|
||||
if err != nil {
|
||||
|
||||
+6
-1
@@ -151,7 +151,6 @@ func (r *RootCmd) AGPLExperimental() []*serpent.Command {
|
||||
r.promptExample(),
|
||||
r.rptyCommand(),
|
||||
r.syncCommand(),
|
||||
r.boundary(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -333,6 +332,12 @@ func (r *RootCmd) Command(subcommands []*serpent.Command) (*serpent.Command, err
|
||||
// support links.
|
||||
return
|
||||
}
|
||||
if cmd.Name() == "boundary" {
|
||||
// The boundary command is integrated from the boundary package
|
||||
// and has YAML-only options (e.g., allowlist from config file)
|
||||
// that don't have flags or env vars.
|
||||
return
|
||||
}
|
||||
merr = errors.Join(
|
||||
merr,
|
||||
xerrors.Errorf("option %q in %q should have a flag or env", opt.Name, cmd.FullName()),
|
||||
|
||||
+12
-2
@@ -1126,7 +1126,7 @@ func TestSSH(t *testing.T) {
|
||||
t.Run("StdioExitOnParentDeath", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong)
|
||||
defer cancel()
|
||||
|
||||
// sleepStart -> agentReady -> sessionStarted -> sleepKill -> sleepDone -> cmdDone
|
||||
@@ -1188,7 +1188,17 @@ func TestSSH(t *testing.T) {
|
||||
}
|
||||
close(sessionStarted)
|
||||
<-sleepDone
|
||||
assert.NoError(t, session.Close())
|
||||
// Ref: https://github.com/coder/internal/issues/1289
|
||||
// This may return either a nil error or io.EOF.
|
||||
// There is an inherent race here:
|
||||
// 1. Sleep process is killed -> sleepDone is closed.
|
||||
// 2. watchParentContext detects parent death, cancels context,
|
||||
// causing SSH session teardown.
|
||||
// 3. We receive from sleepDone and attempt to call session.Close()
|
||||
// Now either:
|
||||
// a. Session teardown completes before we call Close(), resulting in io.EOF
|
||||
// b. We call Close() first, resulting in a nil error.
|
||||
_ = session.Close()
|
||||
}()
|
||||
|
||||
// Wait for our "parent" process to start
|
||||
|
||||
+4
-1
@@ -367,7 +367,9 @@ func TestStartAutoUpdate(t *testing.T) {
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
member, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
version1 := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, nil)
|
||||
version1 := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, nil, func(ctvr *codersdk.CreateTemplateVersionRequest) {
|
||||
ctvr.Name = "v1"
|
||||
})
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version1.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version1.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, member, template.ID, func(cwr *codersdk.CreateWorkspaceRequest) {
|
||||
@@ -379,6 +381,7 @@ func TestStartAutoUpdate(t *testing.T) {
|
||||
coderdtest.MustTransitionWorkspace(t, member, workspace.ID, codersdk.WorkspaceTransitionStart, codersdk.WorkspaceTransitionStop)
|
||||
}
|
||||
version2 := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, prepareEchoResponses(stringRichParameters), func(ctvr *codersdk.CreateTemplateVersionRequest) {
|
||||
ctvr.Name = "v2"
|
||||
ctvr.TemplateID = template.ID
|
||||
})
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version2.ID)
|
||||
|
||||
-4
@@ -20,10 +20,6 @@ OPTIONS:
|
||||
--copy-parameters-from string, $CODER_WORKSPACE_COPY_PARAMETERS_FROM
|
||||
Specify the source workspace name to copy parameters from.
|
||||
|
||||
--non-interactive bool, $CODER_NON_INTERACTIVE
|
||||
Automatically accept all defaults and error when there is no default
|
||||
for a required input.
|
||||
|
||||
--parameter string-array, $CODER_RICH_PARAMETER
|
||||
Rich parameter value in the format "name=value".
|
||||
|
||||
|
||||
+3
@@ -9,6 +9,9 @@ USAGE:
|
||||
macOS and Windows and a plain text file on Linux. Use the --use-keyring flag
|
||||
or CODER_USE_KEYRING environment variable to change the storage mechanism.
|
||||
|
||||
SUBCOMMANDS:
|
||||
token Print the current session token
|
||||
|
||||
OPTIONS:
|
||||
--first-user-email string, $CODER_FIRST_USER_EMAIL
|
||||
Specifies an email address to use if creating the first user for the
|
||||
|
||||
+11
@@ -0,0 +1,11 @@
|
||||
coder v0.0.0-devel
|
||||
|
||||
USAGE:
|
||||
coder login token
|
||||
|
||||
Print the current session token
|
||||
|
||||
Print the session token for use in scripts and automation.
|
||||
|
||||
———
|
||||
Run `coder --help` for a list of global options.
|
||||
@@ -7,7 +7,7 @@
|
||||
"last_seen_at": "====[timestamp]=====",
|
||||
"name": "test-daemon",
|
||||
"version": "v0.0.0-devel",
|
||||
"api_version": "1.14",
|
||||
"api_version": "1.15",
|
||||
"provisioners": [
|
||||
"echo"
|
||||
],
|
||||
|
||||
+13
-3
@@ -15,9 +15,11 @@ SUBCOMMANDS:
|
||||
|
||||
OPTIONS:
|
||||
--allow-workspace-renames bool, $CODER_ALLOW_WORKSPACE_RENAMES (default: false)
|
||||
DEPRECATED: Allow users to rename their workspaces. Use only for
|
||||
temporary compatibility reasons, this will be removed in a future
|
||||
release.
|
||||
Allow users to rename their workspaces. WARNING: Renaming a workspace
|
||||
can cause Terraform resources that depend on the workspace name to be
|
||||
destroyed and recreated, potentially causing data loss. Only enable
|
||||
this if your templates do not use workspace names in resource
|
||||
identifiers, or if you understand the risks.
|
||||
|
||||
--cache-dir string, $CODER_CACHE_DIRECTORY (default: [cache dir])
|
||||
The directory to cache temporary files. If unspecified and
|
||||
@@ -158,6 +160,14 @@ AI BRIDGE OPTIONS:
|
||||
Maximum number of AI Bridge requests per second per replica. Set to 0
|
||||
to disable (unlimited).
|
||||
|
||||
--aibridge-send-actor-headers bool, $CODER_AIBRIDGE_SEND_ACTOR_HEADERS (default: false)
|
||||
Once enabled, extra headers will be added to upstream requests to
|
||||
identify the user (actor) making requests to AI Bridge. This is only
|
||||
needed if you are using a proxy between AI Bridge and an upstream AI
|
||||
provider. This will send X-Ai-Bridge-Actor-Id (the ID of the user
|
||||
making the request) and X-Ai-Bridge-Actor-Metadata-Username (their
|
||||
username).
|
||||
|
||||
--aibridge-structured-logging bool, $CODER_AIBRIDGE_STRUCTURED_LOGGING (default: false)
|
||||
Emit structured logs for AI Bridge interception records. Use this for
|
||||
exporting these records to external SIEM or observability systems.
|
||||
|
||||
+26
-14
@@ -575,8 +575,10 @@ userQuietHoursSchedule:
|
||||
# change their quiet hours schedule and the site default is always used.
|
||||
# (default: true, type: bool)
|
||||
allowCustomQuietHours: true
|
||||
# DEPRECATED: Allow users to rename their workspaces. Use only for temporary
|
||||
# compatibility reasons, this will be removed in a future release.
|
||||
# Allow users to rename their workspaces. WARNING: Renaming a workspace can cause
|
||||
# Terraform resources that depend on the workspace name to be destroyed and
|
||||
# recreated, potentially causing data loss. Only enable this if your templates do
|
||||
# not use workspace names in resource identifiers, or if you understand the risks.
|
||||
# (default: false, type: bool)
|
||||
allowWorkspaceRenames: false
|
||||
# Configure how emails are sent.
|
||||
@@ -773,32 +775,39 @@ aibridge:
|
||||
# Maximum number of concurrent AI Bridge requests per replica. Set to 0 to disable
|
||||
# (unlimited).
|
||||
# (default: 0, type: int)
|
||||
maxConcurrency: 0
|
||||
max_concurrency: 0
|
||||
# Maximum number of AI Bridge requests per second per replica. Set to 0 to disable
|
||||
# (unlimited).
|
||||
# (default: 0, type: int)
|
||||
rateLimit: 0
|
||||
rate_limit: 0
|
||||
# Emit structured logs for AI Bridge interception records. Use this for exporting
|
||||
# these records to external SIEM or observability systems.
|
||||
# (default: false, type: bool)
|
||||
structuredLogging: false
|
||||
structured_logging: false
|
||||
# Once enabled, extra headers will be added to upstream requests to identify the
|
||||
# user (actor) making requests to AI Bridge. This is only needed if you are using
|
||||
# a proxy between AI Bridge and an upstream AI provider. This will send
|
||||
# X-Ai-Bridge-Actor-Id (the ID of the user making the request) and
|
||||
# X-Ai-Bridge-Actor-Metadata-Username (their username).
|
||||
# (default: false, type: bool)
|
||||
send_actor_headers: false
|
||||
# Enable the circuit breaker to protect against cascading failures from upstream
|
||||
# AI provider rate limits (429, 503, 529 overloaded).
|
||||
# (default: false, type: bool)
|
||||
circuitBreakerEnabled: false
|
||||
circuit_breaker_enabled: false
|
||||
# Number of consecutive failures that triggers the circuit breaker to open.
|
||||
# (default: 5, type: int)
|
||||
circuitBreakerFailureThreshold: 5
|
||||
circuit_breaker_failure_threshold: 5
|
||||
# Cyclic period of the closed state for clearing internal failure counts.
|
||||
# (default: 10s, type: duration)
|
||||
circuitBreakerInterval: 10s
|
||||
circuit_breaker_interval: 10s
|
||||
# How long the circuit breaker stays open before transitioning to half-open state.
|
||||
# (default: 30s, type: duration)
|
||||
circuitBreakerTimeout: 30s
|
||||
circuit_breaker_timeout: 30s
|
||||
# Maximum number of requests allowed in half-open state before deciding to close
|
||||
# or re-open the circuit.
|
||||
# (default: 3, type: int)
|
||||
circuitBreakerMaxRequests: 3
|
||||
circuit_breaker_max_requests: 3
|
||||
aibridgeproxy:
|
||||
# Enable the AI Bridge MITM Proxy for intercepting and decrypting AI provider
|
||||
# requests.
|
||||
@@ -813,13 +822,16 @@ aibridgeproxy:
|
||||
# Path to the CA private key file for AI Bridge Proxy.
|
||||
# (default: <unset>, type: string)
|
||||
key_file: ""
|
||||
# Comma-separated list of domains for which HTTPS traffic will be decrypted and
|
||||
# routed through AI Bridge. Requests to other domains will be tunneled directly
|
||||
# without decryption.
|
||||
# (default: api.anthropic.com,api.openai.com, type: string-array)
|
||||
# Comma-separated list of AI provider domains for which HTTPS traffic will be
|
||||
# decrypted and routed through AI Bridge. Requests to other domains will be
|
||||
# tunneled directly without decryption. Supported domains: api.anthropic.com,
|
||||
# api.openai.com, api.individual.githubcopilot.com.
|
||||
# (default: api.anthropic.com,api.openai.com,api.individual.githubcopilot.com,
|
||||
# type: string-array)
|
||||
domain_allowlist:
|
||||
- api.anthropic.com
|
||||
- api.openai.com
|
||||
- api.individual.githubcopilot.com
|
||||
# URL of an upstream HTTP proxy to chain tunneled (non-allowlisted) requests
|
||||
# through. Format: http://[user:pass@]host:port or https://[user:pass@]host:port.
|
||||
# (default: <unset>, type: string)
|
||||
|
||||
+16
-8
@@ -17,8 +17,10 @@ import (
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/coderd/agentapi/metadatabatcher"
|
||||
"github.com/coder/coder/v2/coderd/agentapi/resourcesmonitor"
|
||||
"github.com/coder/coder/v2/coderd/appearance"
|
||||
"github.com/coder/coder/v2/coderd/boundaryusage"
|
||||
"github.com/coder/coder/v2/coderd/connectionlog"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/pubsub"
|
||||
@@ -65,10 +67,11 @@ type API struct {
|
||||
var _ agentproto.DRPCAgentServer = &API{}
|
||||
|
||||
type Options struct {
|
||||
AgentID uuid.UUID
|
||||
OwnerID uuid.UUID
|
||||
WorkspaceID uuid.UUID
|
||||
OrganizationID uuid.UUID
|
||||
AgentID uuid.UUID
|
||||
OwnerID uuid.UUID
|
||||
WorkspaceID uuid.UUID
|
||||
OrganizationID uuid.UUID
|
||||
TemplateVersionID uuid.UUID
|
||||
|
||||
AuthenticatedCtx context.Context
|
||||
Log slog.Logger
|
||||
@@ -80,10 +83,12 @@ type Options struct {
|
||||
DerpMapFn func() *tailcfg.DERPMap
|
||||
TailnetCoordinator *atomic.Pointer[tailnet.Coordinator]
|
||||
StatsReporter *workspacestats.Reporter
|
||||
MetadataBatcher *metadatabatcher.Batcher
|
||||
AppearanceFetcher *atomic.Pointer[appearance.Fetcher]
|
||||
PublishWorkspaceUpdateFn func(ctx context.Context, userID uuid.UUID, event wspubsub.WorkspaceEvent)
|
||||
PublishWorkspaceAgentLogsUpdateFn func(ctx context.Context, workspaceAgentID uuid.UUID, msg agentsdk.LogsNotifyMessage)
|
||||
NetworkTelemetryHandler func(batch []*tailnetproto.TelemetryEvent)
|
||||
BoundaryUsageTracker *boundaryusage.Tracker
|
||||
|
||||
AccessURL *url.URL
|
||||
AppHostname string
|
||||
@@ -178,8 +183,8 @@ func New(opts Options, workspace database.Workspace) *API {
|
||||
AgentFn: api.agent,
|
||||
Workspace: api.cachedWorkspaceFields,
|
||||
Database: opts.Database,
|
||||
Pubsub: opts.Pubsub,
|
||||
Log: opts.Log,
|
||||
Batcher: opts.MetadataBatcher,
|
||||
}
|
||||
|
||||
api.LogsAPI = &LogsAPI{
|
||||
@@ -221,9 +226,12 @@ func New(opts Options, workspace database.Workspace) *API {
|
||||
}
|
||||
|
||||
api.BoundaryLogsAPI = &BoundaryLogsAPI{
|
||||
Log: opts.Log,
|
||||
WorkspaceID: opts.WorkspaceID,
|
||||
TemplateID: workspace.TemplateID,
|
||||
Log: opts.Log,
|
||||
WorkspaceID: opts.WorkspaceID,
|
||||
OwnerID: opts.OwnerID,
|
||||
TemplateID: workspace.TemplateID,
|
||||
TemplateVersionID: opts.TemplateVersionID,
|
||||
BoundaryUsageTracker: opts.BoundaryUsageTracker,
|
||||
}
|
||||
|
||||
// Start background cache refresh loop to handle workspace changes
|
||||
|
||||
@@ -8,15 +8,21 @@ import (
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/coderd/boundaryusage"
|
||||
)
|
||||
|
||||
type BoundaryLogsAPI struct {
|
||||
Log slog.Logger
|
||||
WorkspaceID uuid.UUID
|
||||
TemplateID uuid.UUID
|
||||
Log slog.Logger
|
||||
WorkspaceID uuid.UUID
|
||||
OwnerID uuid.UUID
|
||||
TemplateID uuid.UUID
|
||||
TemplateVersionID uuid.UUID
|
||||
BoundaryUsageTracker *boundaryusage.Tracker
|
||||
}
|
||||
|
||||
func (a *BoundaryLogsAPI) ReportBoundaryLogs(ctx context.Context, req *agentproto.ReportBoundaryLogsRequest) (*agentproto.ReportBoundaryLogsResponse, error) {
|
||||
var allowed, denied int64
|
||||
|
||||
for _, l := range req.Logs {
|
||||
var logTime time.Time
|
||||
if l.Time != nil {
|
||||
@@ -31,10 +37,17 @@ func (a *BoundaryLogsAPI) ReportBoundaryLogs(ctx context.Context, req *agentprot
|
||||
continue
|
||||
}
|
||||
|
||||
if l.Allowed {
|
||||
allowed++
|
||||
} else {
|
||||
denied++
|
||||
}
|
||||
|
||||
fields := []slog.Field{
|
||||
slog.F("decision", allowBoolToString(l.Allowed)),
|
||||
slog.F("workspace_id", a.WorkspaceID.String()),
|
||||
slog.F("template_id", a.TemplateID.String()),
|
||||
slog.F("template_version_id", a.TemplateVersionID.String()),
|
||||
slog.F("http_method", r.HttpRequest.Method),
|
||||
slog.F("http_url", r.HttpRequest.Url),
|
||||
slog.F("event_time", logTime.Format(time.RFC3339Nano)),
|
||||
@@ -50,6 +63,10 @@ func (a *BoundaryLogsAPI) ReportBoundaryLogs(ctx context.Context, req *agentprot
|
||||
}
|
||||
}
|
||||
|
||||
if a.BoundaryUsageTracker != nil && (allowed > 0 || denied > 0) {
|
||||
a.BoundaryUsageTracker.Track(a.WorkspaceID, a.OwnerID, allowed, denied)
|
||||
}
|
||||
|
||||
return &agentproto.ReportBoundaryLogsResponse{}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -2,27 +2,25 @@ package agentapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/coderd/agentapi/metadatabatcher"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/coderd/database/pubsub"
|
||||
)
|
||||
|
||||
type MetadataAPI struct {
|
||||
AgentFn func(context.Context) (database.WorkspaceAgent, error)
|
||||
Workspace *CachedWorkspaceFields
|
||||
Database database.Store
|
||||
Pubsub pubsub.Pubsub
|
||||
Log slog.Logger
|
||||
Batcher *metadatabatcher.Batcher
|
||||
|
||||
TimeNowFn func() time.Time // defaults to dbtime.Now()
|
||||
}
|
||||
@@ -122,21 +120,10 @@ func (a *MetadataAPI) BatchUpdateMetadata(ctx context.Context, req *agentproto.B
|
||||
)
|
||||
}
|
||||
|
||||
err = a.Database.UpdateWorkspaceAgentMetadata(rbacCtx, dbUpdate)
|
||||
// Use batcher to batch metadata updates.
|
||||
err = a.Batcher.Add(workspaceAgent.ID, dbUpdate.Key, dbUpdate.Value, dbUpdate.Error, dbUpdate.CollectedAt)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("update workspace agent metadata in database: %w", err)
|
||||
}
|
||||
|
||||
payload, err := json.Marshal(WorkspaceAgentMetadataChannelPayload{
|
||||
CollectedAt: collectedAt,
|
||||
Keys: dbUpdate.Key,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("marshal workspace agent metadata channel payload: %w", err)
|
||||
}
|
||||
err = a.Pubsub.Publish(WatchWorkspaceAgentMetadataChannel(workspaceAgent.ID), payload)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("publish workspace agent metadata: %w", err)
|
||||
return nil, xerrors.Errorf("add metadata to batcher: %w", err)
|
||||
}
|
||||
|
||||
// If the metadata keys were too large, we return an error so the agent can
|
||||
@@ -154,12 +141,3 @@ func ellipse(v string, n int) string {
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
type WorkspaceAgentMetadataChannelPayload struct {
|
||||
CollectedAt time.Time `json:"collected_at"`
|
||||
Keys []string `json:"keys"`
|
||||
}
|
||||
|
||||
func WatchWorkspaceAgentMetadataChannel(id uuid.UUID) string {
|
||||
return "workspace_agent_metadata:" + id.String()
|
||||
}
|
||||
|
||||
@@ -2,44 +2,26 @@ package agentapi_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/mock/gomock"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/coderd/agentapi"
|
||||
"github.com/coder/coder/v2/coderd/agentapi/metadatabatcher"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbmock"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/coderd/database/pubsub"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
"github.com/coder/coder/v2/coderd/rbac/policy"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
"github.com/coder/quartz"
|
||||
)
|
||||
|
||||
type fakePublisher struct {
|
||||
// Nil pointer to pass interface check.
|
||||
pubsub.Pubsub
|
||||
publishes [][]byte
|
||||
}
|
||||
|
||||
var _ pubsub.Pubsub = &fakePublisher{}
|
||||
|
||||
func (f *fakePublisher) Publish(_ string, message []byte) error {
|
||||
f.publishes = append(f.publishes, message)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestBatchUpdateMetadata(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -50,8 +32,12 @@ func TestBatchUpdateMetadata(t *testing.T) {
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dbM := dbmock.NewMockStore(gomock.NewController(t))
|
||||
pub := &fakePublisher{}
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
store := dbmock.NewMockStore(ctrl)
|
||||
ps := pubsub.NewInMemory()
|
||||
reg := prometheus.NewRegistry()
|
||||
|
||||
now := dbtime.Now()
|
||||
req := &agentproto.BatchUpdateMetadataRequest{
|
||||
@@ -76,24 +62,30 @@ func TestBatchUpdateMetadata(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
batchSize := len(req.Metadata)
|
||||
// This test sends 2 metadata entries. With batch size 2, we expect
|
||||
// exactly 1 capacity flush.
|
||||
store.EXPECT().
|
||||
BatchUpdateWorkspaceAgentMetadata(gomock.Any(), gomock.Any()).
|
||||
Return(nil).
|
||||
Times(1)
|
||||
|
||||
dbM.EXPECT().UpdateWorkspaceAgentMetadata(gomock.Any(), database.UpdateWorkspaceAgentMetadataParams{
|
||||
WorkspaceAgentID: agent.ID,
|
||||
Key: []string{req.Metadata[0].Key, req.Metadata[1].Key},
|
||||
Value: []string{req.Metadata[0].Result.Value, req.Metadata[1].Result.Value},
|
||||
Error: []string{req.Metadata[0].Result.Error, req.Metadata[1].Result.Error},
|
||||
// The value from the agent is ignored.
|
||||
CollectedAt: []time.Time{now, now},
|
||||
}).Return(nil)
|
||||
// Create a real batcher for the test with batch size matching the number
|
||||
// of metadata entries to trigger exactly one capacity flush.
|
||||
batcher, err := metadatabatcher.NewBatcher(ctx, reg, store, ps,
|
||||
metadatabatcher.WithLogger(testutil.Logger(t)),
|
||||
metadatabatcher.WithBatchSize(batchSize),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(batcher.Close)
|
||||
|
||||
api := &agentapi.MetadataAPI{
|
||||
AgentFn: func(context.Context) (database.WorkspaceAgent, error) {
|
||||
return agent, nil
|
||||
},
|
||||
Workspace: &agentapi.CachedWorkspaceFields{},
|
||||
Database: dbM,
|
||||
Pubsub: pub,
|
||||
Log: testutil.Logger(t),
|
||||
Batcher: batcher,
|
||||
TimeNowFn: func() time.Time {
|
||||
return now
|
||||
},
|
||||
@@ -103,27 +95,33 @@ func TestBatchUpdateMetadata(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &agentproto.BatchUpdateMetadataResponse{}, resp)
|
||||
|
||||
require.Equal(t, 1, len(pub.publishes))
|
||||
var gotEvent agentapi.WorkspaceAgentMetadataChannelPayload
|
||||
require.NoError(t, json.Unmarshal(pub.publishes[0], &gotEvent))
|
||||
require.Equal(t, agentapi.WorkspaceAgentMetadataChannelPayload{
|
||||
CollectedAt: now,
|
||||
Keys: []string{req.Metadata[0].Key, req.Metadata[1].Key},
|
||||
}, gotEvent)
|
||||
// Wait for the capacity flush to complete before test ends.
|
||||
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
|
||||
return prom_testutil.ToFloat64(batcher.Metrics.MetadataTotal) == 2.0
|
||||
}, testutil.IntervalFast)
|
||||
})
|
||||
|
||||
t.Run("ExceededLength", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dbM := dbmock.NewMockStore(gomock.NewController(t))
|
||||
pub := pubsub.NewInMemory()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
ctrl := gomock.NewController(t)
|
||||
store := dbmock.NewMockStore(ctrl)
|
||||
ps := pubsub.NewInMemory()
|
||||
reg := prometheus.NewRegistry()
|
||||
|
||||
// This test sends 4 metadata entries with some exceeding length limits. We set the batchers batch size so that
|
||||
// we can reliably ensure a batch is sent within the WaitShort time period.
|
||||
store.EXPECT().
|
||||
BatchUpdateWorkspaceAgentMetadata(gomock.Any(), gomock.Any()).
|
||||
Return(nil).
|
||||
Times(1)
|
||||
|
||||
now := dbtime.Now()
|
||||
almostLongValue := ""
|
||||
for i := 0; i < 2048; i++ {
|
||||
almostLongValue += "a"
|
||||
}
|
||||
|
||||
now := dbtime.Now()
|
||||
req := &agentproto.BatchUpdateMetadataRequest{
|
||||
Metadata: []*agentproto.Metadata{
|
||||
{
|
||||
@@ -152,34 +150,21 @@ func TestBatchUpdateMetadata(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
dbM.EXPECT().UpdateWorkspaceAgentMetadata(gomock.Any(), database.UpdateWorkspaceAgentMetadataParams{
|
||||
WorkspaceAgentID: agent.ID,
|
||||
Key: []string{req.Metadata[0].Key, req.Metadata[1].Key, req.Metadata[2].Key, req.Metadata[3].Key},
|
||||
Value: []string{
|
||||
almostLongValue,
|
||||
almostLongValue, // truncated
|
||||
"",
|
||||
"",
|
||||
},
|
||||
Error: []string{
|
||||
"",
|
||||
"value of 2049 bytes exceeded 2048 bytes",
|
||||
almostLongValue,
|
||||
"error of 2049 bytes exceeded 2048 bytes", // replaced
|
||||
},
|
||||
// The value from the agent is ignored.
|
||||
CollectedAt: []time.Time{now, now, now, now},
|
||||
}).Return(nil)
|
||||
batchSize := len(req.Metadata)
|
||||
batcher, err := metadatabatcher.NewBatcher(ctx, reg, store, ps,
|
||||
metadatabatcher.WithLogger(testutil.Logger(t)),
|
||||
metadatabatcher.WithBatchSize(batchSize),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(batcher.Close)
|
||||
|
||||
api := &agentapi.MetadataAPI{
|
||||
AgentFn: func(context.Context) (database.WorkspaceAgent, error) {
|
||||
return agent, nil
|
||||
},
|
||||
Workspace: &agentapi.CachedWorkspaceFields{},
|
||||
Database: dbM,
|
||||
Pubsub: pub,
|
||||
Log: testutil.Logger(t),
|
||||
Batcher: batcher,
|
||||
TimeNowFn: func() time.Time {
|
||||
return now
|
||||
},
|
||||
@@ -188,13 +173,21 @@ func TestBatchUpdateMetadata(t *testing.T) {
|
||||
resp, err := api.BatchUpdateMetadata(context.Background(), req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &agentproto.BatchUpdateMetadataResponse{}, resp)
|
||||
// Wait for the capacity flush to complete before test ends.
|
||||
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
|
||||
return prom_testutil.ToFloat64(batcher.Metrics.MetadataTotal) == 4.0
|
||||
}, testutil.IntervalFast)
|
||||
})
|
||||
|
||||
t.Run("KeysTooLong", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dbM := dbmock.NewMockStore(gomock.NewController(t))
|
||||
pub := pubsub.NewInMemory()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
store := dbmock.NewMockStore(ctrl)
|
||||
ps := pubsub.NewInMemory()
|
||||
reg := prometheus.NewRegistry()
|
||||
|
||||
now := dbtime.Now()
|
||||
req := &agentproto.BatchUpdateMetadataRequest{
|
||||
@@ -231,595 +224,40 @@ func TestBatchUpdateMetadata(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
batchSize := len(req.Metadata)
|
||||
|
||||
dbM.EXPECT().UpdateWorkspaceAgentMetadata(gomock.Any(), database.UpdateWorkspaceAgentMetadataParams{
|
||||
WorkspaceAgentID: agent.ID,
|
||||
// No key 4.
|
||||
Key: []string{req.Metadata[0].Key, req.Metadata[1].Key, req.Metadata[2].Key},
|
||||
Value: []string{req.Metadata[0].Result.Value, req.Metadata[1].Result.Value, req.Metadata[2].Result.Value},
|
||||
Error: []string{req.Metadata[0].Result.Error, req.Metadata[1].Result.Error, req.Metadata[2].Result.Error},
|
||||
// The value from the agent is ignored.
|
||||
CollectedAt: []time.Time{now, now, now},
|
||||
}).Return(nil)
|
||||
// This test sends 4 metadata entries but rejects the last one due to excessive key length.
|
||||
// We set the batchers batch size so that we can reliably ensure a batch is sent within the WaitShort time period.
|
||||
store.EXPECT().
|
||||
BatchUpdateWorkspaceAgentMetadata(gomock.Any(), gomock.Any()).
|
||||
Return(nil).
|
||||
Times(1)
|
||||
|
||||
batcher, err := metadatabatcher.NewBatcher(ctx, reg, store, ps,
|
||||
metadatabatcher.WithLogger(testutil.Logger(t)),
|
||||
metadatabatcher.WithBatchSize(batchSize-1), // one of the keys will be rejected
|
||||
)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(batcher.Close)
|
||||
|
||||
api := &agentapi.MetadataAPI{
|
||||
AgentFn: func(context.Context) (database.WorkspaceAgent, error) {
|
||||
return agent, nil
|
||||
},
|
||||
Workspace: &agentapi.CachedWorkspaceFields{},
|
||||
Database: dbM,
|
||||
Pubsub: pub,
|
||||
Log: testutil.Logger(t),
|
||||
Batcher: batcher,
|
||||
TimeNowFn: func() time.Time {
|
||||
return now
|
||||
},
|
||||
}
|
||||
|
||||
// Watch the pubsub for events.
|
||||
var (
|
||||
eventCount int64
|
||||
gotEvent agentapi.WorkspaceAgentMetadataChannelPayload
|
||||
)
|
||||
cancel, err := pub.Subscribe(agentapi.WatchWorkspaceAgentMetadataChannel(agent.ID), func(ctx context.Context, message []byte) {
|
||||
if atomic.AddInt64(&eventCount, 1) > 1 {
|
||||
return
|
||||
}
|
||||
require.NoError(t, json.Unmarshal(message, &gotEvent))
|
||||
})
|
||||
require.NoError(t, err)
|
||||
defer cancel()
|
||||
|
||||
resp, err := api.BatchUpdateMetadata(context.Background(), req)
|
||||
// Should return error because keys are too long.
|
||||
require.Error(t, err)
|
||||
require.Equal(t, "metadata keys of 6145 bytes exceeded 6144 bytes", err.Error())
|
||||
require.Nil(t, resp)
|
||||
|
||||
require.Equal(t, int64(1), atomic.LoadInt64(&eventCount))
|
||||
require.Equal(t, agentapi.WorkspaceAgentMetadataChannelPayload{
|
||||
CollectedAt: now,
|
||||
// No key 4.
|
||||
Keys: []string{req.Metadata[0].Key, req.Metadata[1].Key, req.Metadata[2].Key},
|
||||
}, gotEvent)
|
||||
})
|
||||
|
||||
// Test RBAC fast path with valid RBAC object - should NOT call GetWorkspaceByAgentID
|
||||
// This test verifies that when a valid RBAC object is present in context, the dbauthz layer
|
||||
// uses the fast path and skips the GetWorkspaceByAgentID database call.
|
||||
t.Run("WorkspaceCached_SkipsDBCall", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
ctrl = gomock.NewController(t)
|
||||
dbM = dbmock.NewMockStore(ctrl)
|
||||
pub = &fakePublisher{}
|
||||
now = dbtime.Now()
|
||||
// Set up consistent IDs that represent a valid workspace->agent relationship
|
||||
workspaceID = uuid.MustParse("12345678-1234-1234-1234-123456789012")
|
||||
templateID = uuid.MustParse("aaaabbbb-cccc-dddd-eeee-ffffffff0000")
|
||||
ownerID = uuid.MustParse("87654321-4321-4321-4321-210987654321")
|
||||
orgID = uuid.MustParse("11111111-1111-1111-1111-111111111111")
|
||||
agentID = uuid.MustParse("aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa")
|
||||
)
|
||||
|
||||
agent := database.WorkspaceAgent{
|
||||
ID: agentID,
|
||||
// In a real scenario, this agent would belong to a resource in the workspace above
|
||||
}
|
||||
|
||||
req := &agentproto.BatchUpdateMetadataRequest{
|
||||
Metadata: []*agentproto.Metadata{
|
||||
{
|
||||
Key: "test_key",
|
||||
Result: &agentproto.WorkspaceAgentMetadata_Result{
|
||||
CollectedAt: timestamppb.New(now.Add(-time.Second)),
|
||||
Age: 1,
|
||||
Value: "test_value",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Expect UpdateWorkspaceAgentMetadata to be called
|
||||
dbM.EXPECT().UpdateWorkspaceAgentMetadata(gomock.Any(), database.UpdateWorkspaceAgentMetadataParams{
|
||||
WorkspaceAgentID: agent.ID,
|
||||
Key: []string{"test_key"},
|
||||
Value: []string{"test_value"},
|
||||
Error: []string{""},
|
||||
CollectedAt: []time.Time{now},
|
||||
}).Return(nil)
|
||||
|
||||
// DO NOT expect GetWorkspaceByAgentID - the fast path should skip this call
|
||||
// If GetWorkspaceByAgentID is called, the test will fail with "unexpected call"
|
||||
|
||||
// dbauthz will call Wrappers() to check for wrapped databases
|
||||
dbM.EXPECT().Wrappers().Return([]string{}).AnyTimes()
|
||||
|
||||
// Set up dbauthz to test the actual authorization layer
|
||||
auth := rbac.NewStrictCachingAuthorizer(prometheus.NewRegistry())
|
||||
accessControlStore := &atomic.Pointer[dbauthz.AccessControlStore]{}
|
||||
var acs dbauthz.AccessControlStore = dbauthz.AGPLTemplateAccessControlStore{}
|
||||
accessControlStore.Store(&acs)
|
||||
|
||||
api := &agentapi.MetadataAPI{
|
||||
AgentFn: func(_ context.Context) (database.WorkspaceAgent, error) {
|
||||
return agent, nil
|
||||
},
|
||||
Workspace: &agentapi.CachedWorkspaceFields{},
|
||||
Database: dbauthz.New(dbM, auth, testutil.Logger(t), accessControlStore),
|
||||
Pubsub: pub,
|
||||
Log: testutil.Logger(t),
|
||||
TimeNowFn: func() time.Time {
|
||||
return now
|
||||
},
|
||||
}
|
||||
|
||||
api.Workspace.UpdateValues(database.Workspace{
|
||||
ID: workspaceID,
|
||||
OwnerID: ownerID,
|
||||
OrganizationID: orgID,
|
||||
})
|
||||
|
||||
// Create roles with workspace permissions
|
||||
userRoles := rbac.Roles([]rbac.Role{
|
||||
{
|
||||
Identifier: rbac.RoleMember(),
|
||||
User: []rbac.Permission{
|
||||
{
|
||||
Negate: false,
|
||||
ResourceType: rbac.ResourceWorkspace.Type,
|
||||
Action: policy.WildcardSymbol,
|
||||
},
|
||||
},
|
||||
ByOrgID: map[string]rbac.OrgPermissions{
|
||||
orgID.String(): {
|
||||
Member: []rbac.Permission{
|
||||
{
|
||||
Negate: false,
|
||||
ResourceType: rbac.ResourceWorkspace.Type,
|
||||
Action: policy.WildcardSymbol,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
agentScope := rbac.WorkspaceAgentScope(rbac.WorkspaceAgentScopeParams{
|
||||
WorkspaceID: workspaceID,
|
||||
OwnerID: ownerID,
|
||||
TemplateID: templateID,
|
||||
VersionID: uuid.New(),
|
||||
})
|
||||
|
||||
ctx := dbauthz.As(context.Background(), rbac.Subject{
|
||||
Type: rbac.SubjectTypeUser,
|
||||
FriendlyName: "testuser",
|
||||
Email: "testuser@example.com",
|
||||
ID: ownerID.String(),
|
||||
Roles: userRoles,
|
||||
Groups: []string{orgID.String()},
|
||||
Scope: agentScope,
|
||||
}.WithCachedASTValue())
|
||||
|
||||
resp, err := api.BatchUpdateMetadata(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
})
|
||||
// Test RBAC slow path - invalid RBAC object should fall back to GetWorkspaceByAgentID
|
||||
// This test verifies that when the RBAC object has invalid IDs (nil UUIDs), the dbauthz layer
|
||||
// falls back to the slow path and calls GetWorkspaceByAgentID.
|
||||
t.Run("InvalidWorkspaceCached_RequiresDBCall", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
ctrl = gomock.NewController(t)
|
||||
dbM = dbmock.NewMockStore(ctrl)
|
||||
pub = &fakePublisher{}
|
||||
now = dbtime.Now()
|
||||
workspaceID = uuid.MustParse("12345678-1234-1234-1234-123456789012")
|
||||
templateID = uuid.MustParse("aaaabbbb-cccc-dddd-eeee-ffffffff0000")
|
||||
ownerID = uuid.MustParse("87654321-4321-4321-4321-210987654321")
|
||||
orgID = uuid.MustParse("11111111-1111-1111-1111-111111111111")
|
||||
agentID = uuid.MustParse("bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb")
|
||||
)
|
||||
|
||||
agent := database.WorkspaceAgent{
|
||||
ID: agentID,
|
||||
}
|
||||
|
||||
req := &agentproto.BatchUpdateMetadataRequest{
|
||||
Metadata: []*agentproto.Metadata{
|
||||
{
|
||||
Key: "test_key",
|
||||
Result: &agentproto.WorkspaceAgentMetadata_Result{
|
||||
CollectedAt: timestamppb.New(now.Add(-time.Second)),
|
||||
Age: 1,
|
||||
Value: "test_value",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// EXPECT GetWorkspaceByAgentID to be called because the RBAC fast path validation fails
|
||||
dbM.EXPECT().GetWorkspaceByAgentID(gomock.Any(), agentID).Return(database.Workspace{
|
||||
ID: workspaceID,
|
||||
OwnerID: ownerID,
|
||||
OrganizationID: orgID,
|
||||
}, nil)
|
||||
|
||||
// Expect UpdateWorkspaceAgentMetadata to be called after authorization
|
||||
dbM.EXPECT().UpdateWorkspaceAgentMetadata(gomock.Any(), database.UpdateWorkspaceAgentMetadataParams{
|
||||
WorkspaceAgentID: agent.ID,
|
||||
Key: []string{"test_key"},
|
||||
Value: []string{"test_value"},
|
||||
Error: []string{""},
|
||||
CollectedAt: []time.Time{now},
|
||||
}).Return(nil)
|
||||
|
||||
// dbauthz will call Wrappers() to check for wrapped databases
|
||||
dbM.EXPECT().Wrappers().Return([]string{}).AnyTimes()
|
||||
|
||||
// Set up dbauthz to test the actual authorization layer
|
||||
auth := rbac.NewStrictCachingAuthorizer(prometheus.NewRegistry())
|
||||
accessControlStore := &atomic.Pointer[dbauthz.AccessControlStore]{}
|
||||
var acs dbauthz.AccessControlStore = dbauthz.AGPLTemplateAccessControlStore{}
|
||||
accessControlStore.Store(&acs)
|
||||
|
||||
api := &agentapi.MetadataAPI{
|
||||
AgentFn: func(_ context.Context) (database.WorkspaceAgent, error) {
|
||||
return agent, nil
|
||||
},
|
||||
|
||||
Workspace: &agentapi.CachedWorkspaceFields{},
|
||||
Database: dbauthz.New(dbM, auth, testutil.Logger(t), accessControlStore),
|
||||
Pubsub: pub,
|
||||
Log: testutil.Logger(t),
|
||||
TimeNowFn: func() time.Time {
|
||||
return now
|
||||
},
|
||||
}
|
||||
|
||||
// Create an invalid RBAC object with nil UUIDs for owner/org
|
||||
// This will fail dbauthz fast path validation and trigger GetWorkspaceByAgentID
|
||||
api.Workspace.UpdateValues(database.Workspace{
|
||||
ID: uuid.MustParse("cccccccc-cccc-cccc-cccc-cccccccccccc"),
|
||||
OwnerID: uuid.Nil, // Invalid: fails dbauthz fast path validation
|
||||
OrganizationID: uuid.Nil, // Invalid: fails dbauthz fast path validation
|
||||
})
|
||||
|
||||
// Create roles with workspace permissions
|
||||
userRoles := rbac.Roles([]rbac.Role{
|
||||
{
|
||||
Identifier: rbac.RoleMember(),
|
||||
User: []rbac.Permission{
|
||||
{
|
||||
Negate: false,
|
||||
ResourceType: rbac.ResourceWorkspace.Type,
|
||||
Action: policy.WildcardSymbol,
|
||||
},
|
||||
},
|
||||
ByOrgID: map[string]rbac.OrgPermissions{
|
||||
orgID.String(): {
|
||||
Member: []rbac.Permission{
|
||||
{
|
||||
Negate: false,
|
||||
ResourceType: rbac.ResourceWorkspace.Type,
|
||||
Action: policy.WildcardSymbol,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
agentScope := rbac.WorkspaceAgentScope(rbac.WorkspaceAgentScopeParams{
|
||||
WorkspaceID: workspaceID,
|
||||
OwnerID: ownerID,
|
||||
TemplateID: templateID,
|
||||
VersionID: uuid.New(),
|
||||
})
|
||||
|
||||
ctx := dbauthz.As(context.Background(), rbac.Subject{
|
||||
Type: rbac.SubjectTypeUser,
|
||||
FriendlyName: "testuser",
|
||||
Email: "testuser@example.com",
|
||||
ID: ownerID.String(),
|
||||
Roles: userRoles,
|
||||
Groups: []string{orgID.String()},
|
||||
Scope: agentScope,
|
||||
}.WithCachedASTValue())
|
||||
|
||||
resp, err := api.BatchUpdateMetadata(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
})
|
||||
|
||||
// Test RBAC slow path - no RBAC object in context
|
||||
// This test verifies that when no RBAC object is present in context, the dbauthz layer
|
||||
// falls back to the slow path and calls GetWorkspaceByAgentID.
|
||||
t.Run("WorkspaceNotCached_RequiresDBCall", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
ctrl = gomock.NewController(t)
|
||||
dbM = dbmock.NewMockStore(ctrl)
|
||||
pub = &fakePublisher{}
|
||||
now = dbtime.Now()
|
||||
workspaceID = uuid.MustParse("12345678-1234-1234-1234-123456789012")
|
||||
templateID = uuid.MustParse("aaaabbbb-cccc-dddd-eeee-ffffffff0000")
|
||||
ownerID = uuid.MustParse("87654321-4321-4321-4321-210987654321")
|
||||
orgID = uuid.MustParse("11111111-1111-1111-1111-111111111111")
|
||||
agentID = uuid.MustParse("dddddddd-dddd-dddd-dddd-dddddddddddd")
|
||||
)
|
||||
|
||||
agent := database.WorkspaceAgent{
|
||||
ID: agentID,
|
||||
}
|
||||
|
||||
req := &agentproto.BatchUpdateMetadataRequest{
|
||||
Metadata: []*agentproto.Metadata{
|
||||
{
|
||||
Key: "test_key",
|
||||
Result: &agentproto.WorkspaceAgentMetadata_Result{
|
||||
CollectedAt: timestamppb.New(now.Add(-time.Second)),
|
||||
Age: 1,
|
||||
Value: "test_value",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// EXPECT GetWorkspaceByAgentID to be called because no RBAC object is in context
|
||||
dbM.EXPECT().GetWorkspaceByAgentID(gomock.Any(), agentID).Return(database.Workspace{
|
||||
ID: workspaceID,
|
||||
OwnerID: ownerID,
|
||||
OrganizationID: orgID,
|
||||
}, nil)
|
||||
|
||||
// Expect UpdateWorkspaceAgentMetadata to be called after authorization
|
||||
dbM.EXPECT().UpdateWorkspaceAgentMetadata(gomock.Any(), database.UpdateWorkspaceAgentMetadataParams{
|
||||
WorkspaceAgentID: agent.ID,
|
||||
Key: []string{"test_key"},
|
||||
Value: []string{"test_value"},
|
||||
Error: []string{""},
|
||||
CollectedAt: []time.Time{now},
|
||||
}).Return(nil)
|
||||
|
||||
// dbauthz will call Wrappers() to check for wrapped databases
|
||||
dbM.EXPECT().Wrappers().Return([]string{}).AnyTimes()
|
||||
|
||||
// Set up dbauthz to test the actual authorization layer
|
||||
auth := rbac.NewStrictCachingAuthorizer(prometheus.NewRegistry())
|
||||
accessControlStore := &atomic.Pointer[dbauthz.AccessControlStore]{}
|
||||
var acs dbauthz.AccessControlStore = dbauthz.AGPLTemplateAccessControlStore{}
|
||||
accessControlStore.Store(&acs)
|
||||
|
||||
api := &agentapi.MetadataAPI{
|
||||
AgentFn: func(_ context.Context) (database.WorkspaceAgent, error) {
|
||||
return agent, nil
|
||||
},
|
||||
Workspace: &agentapi.CachedWorkspaceFields{},
|
||||
Database: dbauthz.New(dbM, auth, testutil.Logger(t), accessControlStore),
|
||||
Pubsub: pub,
|
||||
Log: testutil.Logger(t),
|
||||
TimeNowFn: func() time.Time {
|
||||
return now
|
||||
},
|
||||
}
|
||||
|
||||
// Create roles with workspace permissions
|
||||
userRoles := rbac.Roles([]rbac.Role{
|
||||
{
|
||||
Identifier: rbac.RoleMember(),
|
||||
User: []rbac.Permission{
|
||||
{
|
||||
Negate: false,
|
||||
ResourceType: rbac.ResourceWorkspace.Type,
|
||||
Action: policy.WildcardSymbol,
|
||||
},
|
||||
},
|
||||
ByOrgID: map[string]rbac.OrgPermissions{
|
||||
orgID.String(): {
|
||||
Member: []rbac.Permission{
|
||||
{
|
||||
Negate: false,
|
||||
ResourceType: rbac.ResourceWorkspace.Type,
|
||||
Action: policy.WildcardSymbol,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
agentScope := rbac.WorkspaceAgentScope(rbac.WorkspaceAgentScopeParams{
|
||||
WorkspaceID: workspaceID,
|
||||
OwnerID: ownerID,
|
||||
TemplateID: templateID,
|
||||
VersionID: uuid.New(),
|
||||
})
|
||||
|
||||
ctx := dbauthz.As(context.Background(), rbac.Subject{
|
||||
Type: rbac.SubjectTypeUser,
|
||||
FriendlyName: "testuser",
|
||||
Email: "testuser@example.com",
|
||||
ID: ownerID.String(),
|
||||
Roles: userRoles,
|
||||
Groups: []string{orgID.String()},
|
||||
Scope: agentScope,
|
||||
}.WithCachedASTValue())
|
||||
|
||||
resp, err := api.BatchUpdateMetadata(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
})
|
||||
|
||||
// Test cache refresh - AutostartSchedule updated
|
||||
// This test verifies that the cache refresh mechanism actually calls GetWorkspaceByID
|
||||
// and updates the cached workspace fields when the workspace is modified (e.g., autostart schedule changes).
|
||||
t.Run("CacheRefreshed_AutostartScheduleUpdated", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
ctrl = gomock.NewController(t)
|
||||
dbM = dbmock.NewMockStore(ctrl)
|
||||
pub = &fakePublisher{}
|
||||
now = dbtime.Now()
|
||||
mClock = quartz.NewMock(t)
|
||||
tickerTrap = mClock.Trap().TickerFunc("cache_refresh")
|
||||
|
||||
workspaceID = uuid.MustParse("12345678-1234-1234-1234-123456789012")
|
||||
ownerID = uuid.MustParse("87654321-4321-4321-4321-210987654321")
|
||||
orgID = uuid.MustParse("11111111-1111-1111-1111-111111111111")
|
||||
templateID = uuid.MustParse("aaaabbbb-cccc-dddd-eeee-ffffffff0000")
|
||||
agentID = uuid.MustParse("ffffffff-ffff-ffff-ffff-ffffffffffff")
|
||||
)
|
||||
|
||||
agent := database.WorkspaceAgent{
|
||||
ID: agentID,
|
||||
}
|
||||
|
||||
// Initial workspace - has Monday-Friday 9am autostart
|
||||
initialWorkspace := database.Workspace{
|
||||
ID: workspaceID,
|
||||
OwnerID: ownerID,
|
||||
OrganizationID: orgID,
|
||||
TemplateID: templateID,
|
||||
Name: "my-workspace",
|
||||
OwnerUsername: "testuser",
|
||||
TemplateName: "test-template",
|
||||
AutostartSchedule: sql.NullString{Valid: true, String: "CRON_TZ=UTC 0 9 * * 1-5"},
|
||||
}
|
||||
|
||||
// Updated workspace - user changed autostart to 5pm and renamed workspace
|
||||
updatedWorkspace := database.Workspace{
|
||||
ID: workspaceID,
|
||||
OwnerID: ownerID,
|
||||
OrganizationID: orgID,
|
||||
TemplateID: templateID,
|
||||
Name: "my-workspace-renamed", // Changed!
|
||||
OwnerUsername: "testuser",
|
||||
TemplateName: "test-template",
|
||||
AutostartSchedule: sql.NullString{Valid: true, String: "CRON_TZ=UTC 0 17 * * 1-5"}, // Changed!
|
||||
DormantAt: sql.NullTime{},
|
||||
}
|
||||
|
||||
req := &agentproto.BatchUpdateMetadataRequest{
|
||||
Metadata: []*agentproto.Metadata{
|
||||
{
|
||||
Key: "test_key",
|
||||
Result: &agentproto.WorkspaceAgentMetadata_Result{
|
||||
CollectedAt: timestamppb.New(now.Add(-time.Second)),
|
||||
Age: 1,
|
||||
Value: "test_value",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// EXPECT GetWorkspaceByID to be called during cache refresh
|
||||
// This is the key assertion - proves the refresh mechanism is working
|
||||
dbM.EXPECT().GetWorkspaceByID(gomock.Any(), workspaceID).Return(updatedWorkspace, nil)
|
||||
|
||||
// API needs to fetch the agent when calling metadata update
|
||||
dbM.EXPECT().GetWorkspaceAgentByID(gomock.Any(), agentID).Return(agent, nil)
|
||||
|
||||
// After refresh, metadata update should work with updated cache
|
||||
dbM.EXPECT().UpdateWorkspaceAgentMetadata(gomock.Any(), gomock.Any()).DoAndReturn(
|
||||
func(ctx context.Context, params database.UpdateWorkspaceAgentMetadataParams) error {
|
||||
require.Equal(t, agent.ID, params.WorkspaceAgentID)
|
||||
require.Equal(t, []string{"test_key"}, params.Key)
|
||||
require.Equal(t, []string{"test_value"}, params.Value)
|
||||
require.Equal(t, []string{""}, params.Error)
|
||||
require.Len(t, params.CollectedAt, 1)
|
||||
return nil
|
||||
},
|
||||
).AnyTimes()
|
||||
|
||||
// May call GetWorkspaceByAgentID if slow path is used before refresh
|
||||
dbM.EXPECT().GetWorkspaceByAgentID(gomock.Any(), agentID).Return(updatedWorkspace, nil).AnyTimes()
|
||||
|
||||
// dbauthz will call Wrappers()
|
||||
dbM.EXPECT().Wrappers().Return([]string{}).AnyTimes()
|
||||
|
||||
// Set up dbauthz
|
||||
auth := rbac.NewStrictCachingAuthorizer(prometheus.NewRegistry())
|
||||
accessControlStore := &atomic.Pointer[dbauthz.AccessControlStore]{}
|
||||
var acs dbauthz.AccessControlStore = dbauthz.AGPLTemplateAccessControlStore{}
|
||||
accessControlStore.Store(&acs)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Create roles with workspace permissions
|
||||
userRoles := rbac.Roles([]rbac.Role{
|
||||
{
|
||||
Identifier: rbac.RoleMember(),
|
||||
User: []rbac.Permission{
|
||||
{
|
||||
Negate: false,
|
||||
ResourceType: rbac.ResourceWorkspace.Type,
|
||||
Action: policy.WildcardSymbol,
|
||||
},
|
||||
},
|
||||
ByOrgID: map[string]rbac.OrgPermissions{
|
||||
orgID.String(): {
|
||||
Member: []rbac.Permission{
|
||||
{
|
||||
Negate: false,
|
||||
ResourceType: rbac.ResourceWorkspace.Type,
|
||||
Action: policy.WildcardSymbol,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
agentScope := rbac.WorkspaceAgentScope(rbac.WorkspaceAgentScopeParams{
|
||||
WorkspaceID: workspaceID,
|
||||
OwnerID: ownerID,
|
||||
TemplateID: templateID,
|
||||
VersionID: uuid.New(),
|
||||
})
|
||||
|
||||
ctxWithActor := dbauthz.As(ctx, rbac.Subject{
|
||||
Type: rbac.SubjectTypeUser,
|
||||
FriendlyName: "testuser",
|
||||
Email: "testuser@example.com",
|
||||
ID: ownerID.String(),
|
||||
Roles: userRoles,
|
||||
Groups: []string{orgID.String()},
|
||||
Scope: agentScope,
|
||||
}.WithCachedASTValue())
|
||||
|
||||
// Create full API with cached workspace fields (initial state)
|
||||
api := agentapi.New(agentapi.Options{
|
||||
AuthenticatedCtx: ctxWithActor,
|
||||
AgentID: agentID,
|
||||
WorkspaceID: workspaceID,
|
||||
OwnerID: ownerID,
|
||||
OrganizationID: orgID,
|
||||
Database: dbauthz.New(dbM, auth, testutil.Logger(t), accessControlStore),
|
||||
Log: testutil.Logger(t),
|
||||
Clock: mClock,
|
||||
Pubsub: pub,
|
||||
}, initialWorkspace) // Cache is initialized with 9am schedule and "my-workspace" name
|
||||
|
||||
// Wait for ticker to be set up and release it so it can fire
|
||||
tickerTrap.MustWait(ctx).MustRelease(ctx)
|
||||
tickerTrap.Close()
|
||||
|
||||
// Advance clock to trigger cache refresh and wait for it to complete
|
||||
_, aw := mClock.AdvanceNext()
|
||||
aw.MustWait(ctx)
|
||||
|
||||
// At this point, GetWorkspaceByID should have been called and cache updated
|
||||
// The cache now has the 5pm schedule and "my-workspace-renamed" name
|
||||
|
||||
// Now call metadata update to verify the refreshed cache works
|
||||
resp, err := api.MetadataAPI.BatchUpdateMetadata(ctxWithActor, req)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
|
||||
return prom_testutil.ToFloat64(batcher.Metrics.MetadataTotal) == 3.0
|
||||
}, testutil.IntervalFast)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -0,0 +1,59 @@
|
||||
package metadatabatcher
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
const (
|
||||
// uuidBase64Size is the size of a base64-encoded UUID without padding (22 characters).
|
||||
UUIDBase64Size = 22
|
||||
|
||||
// maxAgentIDsPerChunk is the maximum number of agent IDs that can fit in a
|
||||
// single pubsub message. PostgreSQL NOTIFY has an 8KB limit.
|
||||
// With base64 encoding, each UUID is 22 characters, so we can fit
|
||||
// ~363 agent IDs per chunk (8000 / 22 = 363.6).
|
||||
maxAgentIDsPerChunk = maxPubsubPayloadSize / UUIDBase64Size
|
||||
)
|
||||
|
||||
func EncodeAgentID(agentID uuid.UUID, dst []byte) error {
|
||||
// Encode UUID bytes to base64 without padding (RawStdEncoding).
|
||||
// This produces exactly 22 characters per UUID.
|
||||
reqLen := base64.RawStdEncoding.EncodedLen(len(agentID))
|
||||
if len(dst) < reqLen {
|
||||
return xerrors.Errorf("destination byte slice was too small %d, required %d", len(dst), reqLen)
|
||||
}
|
||||
base64.RawStdEncoding.Encode(dst, agentID[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeAgentIDChunks encodes agent IDs into chunks that fit within the
|
||||
// PostgreSQL NOTIFY 8KB payload size limit. Each UUID is base64-encoded
|
||||
// (without padding) and concatenated into a single byte slice per chunk.
|
||||
func EncodeAgentIDChunks(agentIDs []uuid.UUID) ([][]byte, error) {
|
||||
chunks := make([][]byte, 0, (len(agentIDs)+maxAgentIDsPerChunk-1)/maxAgentIDsPerChunk)
|
||||
|
||||
for i := 0; i < len(agentIDs); i += maxAgentIDsPerChunk {
|
||||
end := i + maxAgentIDsPerChunk
|
||||
if end > len(agentIDs) {
|
||||
end = len(agentIDs)
|
||||
}
|
||||
|
||||
chunk := agentIDs[i:end]
|
||||
|
||||
// Build payload by base64-encoding each UUID (without padding) and
|
||||
// concatenating them. This is UTF-8 safe for PostgreSQL NOTIFY.
|
||||
payload := make([]byte, len(chunk)*UUIDBase64Size)
|
||||
for i, agentID := range chunk {
|
||||
err := EncodeAgentID(agentID, payload[i*UUIDBase64Size:(i+1)*UUIDBase64Size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
chunks = append(chunks, payload)
|
||||
}
|
||||
|
||||
return chunks, nil
|
||||
}
|
||||
@@ -0,0 +1,122 @@
|
||||
package metadatabatcher_test
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/agentapi/metadatabatcher"
|
||||
)
|
||||
|
||||
func TestEncodeDecodeRoundTrip(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
agentIDs []uuid.UUID
|
||||
}{
|
||||
{
|
||||
name: "Empty",
|
||||
agentIDs: []uuid.UUID{},
|
||||
},
|
||||
{
|
||||
name: "Single",
|
||||
agentIDs: []uuid.UUID{uuid.New()},
|
||||
},
|
||||
{
|
||||
name: "Multiple",
|
||||
agentIDs: []uuid.UUID{
|
||||
uuid.New(),
|
||||
uuid.New(),
|
||||
uuid.New(),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Exactly 363 (one chunk)",
|
||||
agentIDs: func() []uuid.UUID {
|
||||
ids := make([]uuid.UUID, 363)
|
||||
for i := range ids {
|
||||
ids[i] = uuid.New()
|
||||
}
|
||||
return ids
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "364 (two chunks)",
|
||||
agentIDs: func() []uuid.UUID {
|
||||
ids := make([]uuid.UUID, 364)
|
||||
for i := range ids {
|
||||
ids[i] = uuid.New()
|
||||
}
|
||||
return ids
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "600 (multiple chunks)",
|
||||
agentIDs: func() []uuid.UUID {
|
||||
ids := make([]uuid.UUID, 600)
|
||||
for i := range ids {
|
||||
ids[i] = uuid.New()
|
||||
}
|
||||
return ids
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Encode the agent IDs into chunks.
|
||||
chunks, err := metadatabatcher.EncodeAgentIDChunks(tt.agentIDs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Decode all chunks and collect the agent IDs.
|
||||
var decoded []uuid.UUID
|
||||
for _, chunk := range chunks {
|
||||
for i := 0; i < len(chunk); i += metadatabatcher.UUIDBase64Size {
|
||||
var u uuid.UUID
|
||||
_, err := base64.RawStdEncoding.Decode(u[:], chunk[i:i+metadatabatcher.UUIDBase64Size])
|
||||
require.NoError(t, err)
|
||||
decoded = append(decoded, u)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify we got the same agent IDs back.
|
||||
if len(tt.agentIDs) == 0 {
|
||||
require.Empty(t, decoded)
|
||||
} else {
|
||||
require.Equal(t, tt.agentIDs, decoded)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestEncodeAgentIDChunks_PGPubsubSize ensures that each pubsub message generated via EncodeAgentIDChunks fits within
|
||||
// the max allowed 8kb by Postgres.
|
||||
func TestEncodeAgentIDChunks_PGPubsubSize(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create 600 agents (should split into 2 chunks: 363 + 237).
|
||||
agentIDs := make([]uuid.UUID, 600)
|
||||
for i := range agentIDs {
|
||||
agentIDs[i] = uuid.New()
|
||||
}
|
||||
|
||||
chunks, err := metadatabatcher.EncodeAgentIDChunks(agentIDs)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, chunks, 2)
|
||||
|
||||
// First chunk should have 363 IDs (363 * 22 = 7986 bytes).
|
||||
require.Equal(t, 363*22, len(chunks[0]))
|
||||
|
||||
// Second chunk should have 237 IDs (237 * 22 = 5214 bytes).
|
||||
require.Equal(t, 237*22, len(chunks[1]))
|
||||
|
||||
// Each chunk should be under 8KB.
|
||||
for i, chunk := range chunks {
|
||||
require.LessOrEqual(t, len(chunk), 8000, "chunk %d exceeds 8KB limit", i)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,398 @@
|
||||
package metadatabatcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/pubsub"
|
||||
"github.com/coder/quartz"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultMetadataBatchSize is the maximum number of metadata entries
|
||||
// (key-value pairs across all agents) to batch before forcing a flush.
|
||||
// With typical agents having 5-15 metadata keys, this accommodates
|
||||
// 30-100 agents per batch.
|
||||
defaultMetadataBatchSize = 500
|
||||
|
||||
// defaultChannelBufferMultiplier is the multiplier for the channel buffer size
|
||||
// relative to the batch size. A 5x multiplier provides significant headroom
|
||||
// for bursts while the batch is being flushed.
|
||||
defaultChannelBufferMultiplier = 5
|
||||
|
||||
// defaultMetadataFlushInterval is how frequently to flush batched metadata
|
||||
// updates to the database and pubsub. 5 seconds provides a good balance
|
||||
// between reducing database load and maintaining reasonable UI update
|
||||
// latency.
|
||||
defaultMetadataFlushInterval = 5 * time.Second
|
||||
|
||||
// maxPubsubPayloadSize is the maximum size of a single pubsub message.
|
||||
// PostgreSQL NOTIFY has an 8KB limit for the payload.
|
||||
maxPubsubPayloadSize = 8000 // Leave some headroom below 8192 bytes
|
||||
|
||||
// Timeout to use for the context created when flushing the final batch due to the top level context being 'Done'
|
||||
finalFlushTimeout = 15 * time.Second
|
||||
|
||||
// Channel to publish batch metadata updates to, each update contains a list of all Agent IDs that have an update in
|
||||
// the most recent batch
|
||||
MetadataBatchPubsubChannel = "workspace_agent_metadata_batch"
|
||||
|
||||
// flush reasons
|
||||
flushCapacity = "capacity"
|
||||
flushTicker = "scheduled"
|
||||
flushExit = "shutdown"
|
||||
)
|
||||
|
||||
// compositeKey uniquely identifies a metadata entry by agent ID and key name.
|
||||
type compositeKey struct {
|
||||
agentID uuid.UUID
|
||||
key string
|
||||
}
|
||||
|
||||
// value holds a single metadata key-value pair with its error state
|
||||
// and collection timestamp.
|
||||
type value struct {
|
||||
v string
|
||||
error string
|
||||
collectedAt time.Time
|
||||
}
|
||||
|
||||
// update represents a single metadata update to be batched.
|
||||
type update struct {
|
||||
compositeKey
|
||||
value
|
||||
}
|
||||
|
||||
// Batcher holds a buffer of agent metadata updates and periodically
|
||||
// flushes them to the database and pubsub. This reduces database write
|
||||
// frequency and pubsub publish rate.
|
||||
type Batcher struct {
|
||||
store database.Store
|
||||
ps pubsub.Pubsub
|
||||
log slog.Logger
|
||||
|
||||
// updateCh is the buffered channel that receives metadata updates from Add() calls.
|
||||
updateCh chan update
|
||||
|
||||
// batch holds the current batch being accumulated. For updates with the same composite key the most recent value wins.
|
||||
batch map[compositeKey]value
|
||||
currentBatchLen atomic.Int64
|
||||
maxBatchSize int
|
||||
|
||||
clock quartz.Clock
|
||||
timer *quartz.Timer
|
||||
interval time.Duration
|
||||
// Used to only log at warn level for dropped keys infrequently, as it could be noisy in failure scenarios.
|
||||
warnTicker *quartz.Ticker
|
||||
|
||||
// ctx is the context for the batcher. Used to check if shutdown has begun.
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
done chan struct{}
|
||||
|
||||
// Metrics collects Prometheus metrics for the batcher.
|
||||
Metrics Metrics
|
||||
}
|
||||
|
||||
// Option is a functional option for configuring a Batcher.
|
||||
type Option func(b *Batcher)
|
||||
|
||||
func WithBatchSize(size int) Option {
|
||||
return func(b *Batcher) {
|
||||
b.maxBatchSize = size
|
||||
}
|
||||
}
|
||||
|
||||
func WithInterval(d time.Duration) Option {
|
||||
return func(b *Batcher) {
|
||||
b.interval = d
|
||||
}
|
||||
}
|
||||
|
||||
func WithLogger(log slog.Logger) Option {
|
||||
return func(b *Batcher) {
|
||||
b.log = log
|
||||
}
|
||||
}
|
||||
|
||||
func WithClock(clock quartz.Clock) Option {
|
||||
return func(b *Batcher) {
|
||||
b.clock = clock
|
||||
}
|
||||
}
|
||||
|
||||
// NewBatcher creates a new Batcher and starts it. Here ctx controls the lifetime of the batcher, canceling it will
|
||||
// result in the Batcher exiting it's processing routine (run).
|
||||
func NewBatcher(ctx context.Context, reg prometheus.Registerer, store database.Store, ps pubsub.Pubsub, opts ...Option) (*Batcher, error) {
|
||||
b := &Batcher{
|
||||
store: store,
|
||||
ps: ps,
|
||||
Metrics: NewMetrics(),
|
||||
done: make(chan struct{}),
|
||||
log: slog.Logger{},
|
||||
clock: quartz.NewReal(),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(b)
|
||||
}
|
||||
|
||||
b.Metrics.register(reg)
|
||||
|
||||
if b.interval == 0 {
|
||||
b.interval = defaultMetadataFlushInterval
|
||||
}
|
||||
|
||||
if b.maxBatchSize == 0 {
|
||||
b.maxBatchSize = defaultMetadataBatchSize
|
||||
}
|
||||
|
||||
// Create warn ticker after options are applied so it uses the correct clock.
|
||||
b.warnTicker = b.clock.NewTicker(10 * time.Second)
|
||||
|
||||
if b.timer == nil {
|
||||
b.timer = b.clock.NewTimer(b.interval)
|
||||
}
|
||||
|
||||
// Create buffered channel with 5x batch size capacity
|
||||
channelSize := b.maxBatchSize * defaultChannelBufferMultiplier
|
||||
b.updateCh = make(chan update, channelSize)
|
||||
|
||||
// Initialize batch map
|
||||
b.batch = make(map[compositeKey]value)
|
||||
|
||||
b.ctx, b.cancel = context.WithCancel(ctx)
|
||||
go func() {
|
||||
b.run(b.ctx)
|
||||
close(b.done)
|
||||
}()
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (b *Batcher) Close() {
|
||||
b.cancel()
|
||||
if b.timer != nil {
|
||||
b.timer.Stop()
|
||||
}
|
||||
// Wait for the run function to end, it may be sending one last batch.
|
||||
<-b.done
|
||||
}
|
||||
|
||||
// Add adds metadata updates for an agent to the batcher by writing to a
|
||||
// buffered channel. If the channel is full, updates are dropped. Updates
|
||||
// to the same metadata key for the same agent are deduplicated in the batch,
|
||||
// keeping only the value with the most recent collectedAt timestamp.
|
||||
func (b *Batcher) Add(agentID uuid.UUID, keys []string, values []string, errors []string, collectedAt []time.Time) error {
|
||||
if !(len(keys) == len(values) && len(values) == len(errors) && len(errors) == len(collectedAt)) {
|
||||
return xerrors.Errorf("invalid Add call, all inputs must have the same number of items; keys: %d, values: %d, errors: %d, collectedAt: %d", len(keys), len(values), len(errors), len(collectedAt))
|
||||
}
|
||||
|
||||
// Write each update to the channel. If the channel is full, drop the update.
|
||||
var u update
|
||||
droppedCount := 0
|
||||
for i := range keys {
|
||||
u.agentID = agentID
|
||||
u.key = keys[i]
|
||||
u.v = values[i]
|
||||
u.error = errors[i]
|
||||
u.collectedAt = collectedAt[i]
|
||||
|
||||
select {
|
||||
case b.updateCh <- u:
|
||||
// Successfully queued
|
||||
default:
|
||||
// Channel is full, drop this update
|
||||
droppedCount++
|
||||
}
|
||||
}
|
||||
|
||||
// Log dropped keys if any were dropped.
|
||||
if droppedCount > 0 {
|
||||
msg := "metadata channel at capacity, dropped updates"
|
||||
fields := []slog.Field{
|
||||
slog.F("agent_id", agentID),
|
||||
slog.F("channel_size", cap(b.updateCh)),
|
||||
slog.F("dropped_count", droppedCount),
|
||||
}
|
||||
select {
|
||||
case <-b.warnTicker.C:
|
||||
b.log.Warn(context.Background(), msg, fields...)
|
||||
default:
|
||||
b.log.Debug(context.Background(), msg, fields...)
|
||||
}
|
||||
|
||||
b.Metrics.DroppedKeysTotal.Add(float64(droppedCount))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// processUpdate adds a metadata update to the batch with deduplication based on timestamp.
|
||||
func (b *Batcher) processUpdate(update update) {
|
||||
ck := compositeKey{
|
||||
agentID: update.agentID,
|
||||
key: update.key,
|
||||
}
|
||||
|
||||
// Check if key already exists and only update if new value is newer.
|
||||
existing, exists := b.batch[ck]
|
||||
if exists && update.collectedAt.Before(existing.collectedAt) {
|
||||
return
|
||||
}
|
||||
|
||||
b.batch[ck] = value{
|
||||
v: update.v,
|
||||
error: update.error,
|
||||
collectedAt: update.collectedAt,
|
||||
}
|
||||
if !exists {
|
||||
b.currentBatchLen.Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
// run runs the batcher loop, reading from the update channel and flushing
|
||||
// periodically or when the batch reaches capacity.
|
||||
func (b *Batcher) run(ctx context.Context) {
|
||||
// nolint:gocritic // This is only ever used for one thing - updating agent metadata.
|
||||
authCtx := dbauthz.AsSystemRestricted(ctx)
|
||||
for {
|
||||
select {
|
||||
case update := <-b.updateCh:
|
||||
b.processUpdate(update)
|
||||
|
||||
// Check if batch has reached capacity
|
||||
if int(b.currentBatchLen.Load()) >= b.maxBatchSize {
|
||||
b.flush(authCtx, flushCapacity)
|
||||
// Reset timer so the next scheduled flush is interval duration
|
||||
// from now, not from when it was originally scheduled.
|
||||
b.timer.Reset(b.interval, "metadataBatcher", "capacityFlush")
|
||||
}
|
||||
|
||||
case <-b.timer.C:
|
||||
b.flush(authCtx, flushTicker)
|
||||
// Reset timer to schedule the next flush.
|
||||
b.timer.Reset(b.interval, "metadataBatcher", "scheduledFlush")
|
||||
|
||||
case <-ctx.Done():
|
||||
b.log.Debug(ctx, "context done, flushing before exit")
|
||||
|
||||
// We must create a new context here as the parent context is done.
|
||||
ctxTimeout, cancel := context.WithTimeout(context.Background(), finalFlushTimeout)
|
||||
defer cancel() //nolint:revive // We're returning, defer is fine.
|
||||
|
||||
// nolint:gocritic // This is only ever used for one thing - updating agent metadata.
|
||||
b.flush(dbauthz.AsSystemRestricted(ctxTimeout), flushExit)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// flush flushes the current batch to the database and pubsub.
|
||||
func (b *Batcher) flush(ctx context.Context, reason string) {
|
||||
count := len(b.batch)
|
||||
|
||||
if count == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
start := b.clock.Now()
|
||||
b.log.Debug(ctx, "flushing metadata batch",
|
||||
slog.F("reason", reason),
|
||||
slog.F("count", count),
|
||||
)
|
||||
|
||||
// Convert batch map to parallel arrays for the batch query.
|
||||
// Also build map of agent IDs for per-agent metrics and pubsub.
|
||||
var (
|
||||
agentIDs = make([]uuid.UUID, 0, count)
|
||||
keys = make([]string, 0, count)
|
||||
values = make([]string, 0, count)
|
||||
errors = make([]string, 0, count)
|
||||
collectedAt = make([]time.Time, 0, count)
|
||||
agentKeys = make(map[uuid.UUID]int) // Track keys per agent for metrics
|
||||
)
|
||||
|
||||
for ck, mv := range b.batch {
|
||||
agentIDs = append(agentIDs, ck.agentID)
|
||||
keys = append(keys, ck.key)
|
||||
values = append(values, mv.v)
|
||||
errors = append(errors, mv.error)
|
||||
collectedAt = append(collectedAt, mv.collectedAt)
|
||||
agentKeys[ck.agentID]++
|
||||
}
|
||||
|
||||
// Batch has been processed into slices for our DB request, so we can clear it.
|
||||
// It's safe to clear before we know whether the flush is successful as agent metadata is not critical, and therefore
|
||||
// we do not retry failed flushes and losing a batch of metadata is okay.
|
||||
b.batch = make(map[compositeKey]value)
|
||||
b.currentBatchLen.Store(0)
|
||||
|
||||
// Record per-agent utilization metrics.
|
||||
for _, keyCount := range agentKeys {
|
||||
b.Metrics.BatchUtilization.Observe(float64(keyCount))
|
||||
}
|
||||
|
||||
// Update the database with all metadata updates in a single query.
|
||||
err := b.store.BatchUpdateWorkspaceAgentMetadata(ctx, database.BatchUpdateWorkspaceAgentMetadataParams{
|
||||
WorkspaceAgentID: agentIDs,
|
||||
Key: keys,
|
||||
Value: values,
|
||||
Error: errors,
|
||||
CollectedAt: collectedAt,
|
||||
})
|
||||
elapsed := b.clock.Since(start)
|
||||
|
||||
if err != nil {
|
||||
if database.IsQueryCanceledError(err) {
|
||||
b.log.Debug(ctx, "query canceled, skipping update of workspace agent metadata", slog.F("elapsed", elapsed))
|
||||
return
|
||||
}
|
||||
b.log.Error(ctx, "error updating workspace agent metadata", slog.Error(err), slog.F("elapsed", elapsed))
|
||||
return
|
||||
}
|
||||
|
||||
// Build list of unique agent IDs for pubsub notification.
|
||||
uniqueAgentIDs := make([]uuid.UUID, 0, len(agentKeys))
|
||||
for agentID := range agentKeys {
|
||||
uniqueAgentIDs = append(uniqueAgentIDs, agentID)
|
||||
}
|
||||
|
||||
// Encode agent IDs into chunks and publish them.
|
||||
chunks, err := EncodeAgentIDChunks(uniqueAgentIDs)
|
||||
if err != nil {
|
||||
b.log.Error(ctx, "Agent ID chunk encoding for pubsub failed",
|
||||
slog.Error(err))
|
||||
}
|
||||
for _, chunk := range chunks {
|
||||
if err := b.ps.Publish(MetadataBatchPubsubChannel, chunk); err != nil {
|
||||
b.log.Error(ctx, "failed to publish workspace agent metadata batch",
|
||||
slog.Error(err),
|
||||
slog.F("chunk_size", len(chunk)/UUIDBase64Size),
|
||||
slog.F("payload_size", len(chunk)),
|
||||
)
|
||||
b.Metrics.PublishErrors.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// Record successful batch size and flush duration after successful send/publish.
|
||||
b.Metrics.BatchSize.Observe(float64(count))
|
||||
b.Metrics.MetadataTotal.Add(float64(count))
|
||||
b.Metrics.BatchesTotal.WithLabelValues(reason).Inc()
|
||||
b.Metrics.FlushDuration.WithLabelValues(reason).Observe(time.Since(start).Seconds())
|
||||
|
||||
elapsed = time.Since(start)
|
||||
b.log.Debug(ctx, "flush complete",
|
||||
slog.F("count", count),
|
||||
slog.F("elapsed", elapsed),
|
||||
slog.F("reason", reason),
|
||||
)
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,95 @@
|
||||
package metadatabatcher
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type Metrics struct {
|
||||
BatchUtilization prometheus.Histogram
|
||||
FlushDuration *prometheus.HistogramVec
|
||||
BatchSize prometheus.Histogram
|
||||
BatchesTotal *prometheus.CounterVec
|
||||
DroppedKeysTotal prometheus.Counter
|
||||
MetadataTotal prometheus.Counter
|
||||
PublishErrors prometheus.Counter
|
||||
}
|
||||
|
||||
func NewMetrics() Metrics {
|
||||
return Metrics{
|
||||
BatchUtilization: prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: "coderd",
|
||||
Subsystem: "agentapi",
|
||||
Name: "metadata_batch_utilization",
|
||||
Help: "Number of metadata keys per agent in each batch, updated before flushes.",
|
||||
Buckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 40, 80, 160},
|
||||
}),
|
||||
|
||||
BatchSize: prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: "coderd",
|
||||
Subsystem: "agentapi",
|
||||
Name: "metadata_batch_size",
|
||||
Help: "Total number of metadata entries in each batch, updated before flushes.",
|
||||
Buckets: []float64{10, 25, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500},
|
||||
}),
|
||||
|
||||
FlushDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: "coderd",
|
||||
Subsystem: "agentapi",
|
||||
Name: "metadata_flush_duration_seconds",
|
||||
Help: "Time taken to flush metadata batch to database and pubsub.",
|
||||
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0},
|
||||
}, []string{"reason"}),
|
||||
|
||||
BatchesTotal: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "coderd",
|
||||
Subsystem: "agentapi",
|
||||
Name: "metadata_batches_total",
|
||||
Help: "Total number of metadata batches flushed.",
|
||||
}, []string{"reason"}),
|
||||
|
||||
DroppedKeysTotal: prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "coderd",
|
||||
Subsystem: "agentapi",
|
||||
Name: "metadata_dropped_keys_total",
|
||||
Help: "Total number of metadata keys dropped due to capacity limits.",
|
||||
}),
|
||||
|
||||
MetadataTotal: prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "coderd",
|
||||
Subsystem: "agentapi",
|
||||
Name: "metadata_flushed_total",
|
||||
Help: "Total number of unique metadatas flushed.",
|
||||
}),
|
||||
|
||||
PublishErrors: prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "coderd",
|
||||
Subsystem: "agentapi",
|
||||
Name: "metadata_publish_errors_total",
|
||||
Help: "Total number of metadata batch pubsub publish calls that have resulted in an error.",
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
func (m Metrics) Collectors() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
m.BatchUtilization,
|
||||
m.BatchSize,
|
||||
m.FlushDuration,
|
||||
m.BatchesTotal,
|
||||
m.DroppedKeysTotal,
|
||||
m.MetadataTotal,
|
||||
m.PublishErrors,
|
||||
}
|
||||
}
|
||||
|
||||
func (m Metrics) register(reg prometheus.Registerer) {
|
||||
if reg != nil {
|
||||
reg.MustRegister(m.BatchUtilization)
|
||||
reg.MustRegister(m.BatchSize)
|
||||
reg.MustRegister(m.FlushDuration)
|
||||
reg.MustRegister(m.DroppedKeysTotal)
|
||||
reg.MustRegister(m.BatchesTotal)
|
||||
reg.MustRegister(m.MetadataTotal)
|
||||
reg.MustRegister(m.PublishErrors)
|
||||
}
|
||||
}
|
||||
+308
-31
@@ -3,6 +3,7 @@ package coderd
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
@@ -12,10 +13,12 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
aiagentapi "github.com/coder/agentapi-sdk-go"
|
||||
"cdr.dev/slog/v3"
|
||||
agentapisdk "github.com/coder/agentapi-sdk-go"
|
||||
"github.com/coder/coder/v2/coderd/audit"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
@@ -740,7 +743,7 @@ func (api *API) taskSend(rw http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if err := api.authAndDoWithTaskAppClient(r, task, func(ctx context.Context, client *http.Client, appURL *url.URL) error {
|
||||
agentAPIClient, err := aiagentapi.NewClient(appURL.String(), aiagentapi.WithHTTPClient(client))
|
||||
agentAPIClient, err := agentapisdk.NewClient(appURL.String(), agentapisdk.WithHTTPClient(client))
|
||||
if err != nil {
|
||||
return httperror.NewResponseError(http.StatusBadGateway, codersdk.Response{
|
||||
Message: "Failed to create agentapi client.",
|
||||
@@ -756,16 +759,16 @@ func (api *API) taskSend(rw http.ResponseWriter, r *http.Request) {
|
||||
})
|
||||
}
|
||||
|
||||
if statusResp.Status != aiagentapi.StatusStable {
|
||||
if statusResp.Status != agentapisdk.StatusStable {
|
||||
return httperror.NewResponseError(http.StatusBadGateway, codersdk.Response{
|
||||
Message: "Task app is not ready to accept input.",
|
||||
Detail: fmt.Sprintf("Status: %s", statusResp.Status),
|
||||
})
|
||||
}
|
||||
|
||||
_, err = agentAPIClient.PostMessage(ctx, aiagentapi.PostMessageParams{
|
||||
_, err = agentAPIClient.PostMessage(ctx, agentapisdk.PostMessageParams{
|
||||
Content: req.Input,
|
||||
Type: aiagentapi.MessageTypeUser,
|
||||
Type: agentapisdk.MessageTypeUser,
|
||||
})
|
||||
if err != nil {
|
||||
return httperror.NewResponseError(http.StatusBadGateway, codersdk.Response{
|
||||
@@ -783,6 +786,30 @@ func (api *API) taskSend(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// convertAgentAPIMessagesToLogEntries converts AgentAPI messages to
|
||||
// TaskLogEntry format.
|
||||
func convertAgentAPIMessagesToLogEntries(messages []agentapisdk.Message) ([]codersdk.TaskLogEntry, error) {
|
||||
logs := make([]codersdk.TaskLogEntry, 0, len(messages))
|
||||
for _, m := range messages {
|
||||
var typ codersdk.TaskLogType
|
||||
switch m.Role {
|
||||
case agentapisdk.RoleUser:
|
||||
typ = codersdk.TaskLogTypeInput
|
||||
case agentapisdk.RoleAgent:
|
||||
typ = codersdk.TaskLogTypeOutput
|
||||
default:
|
||||
return nil, xerrors.Errorf("invalid agentapi message role %q", m.Role)
|
||||
}
|
||||
logs = append(logs, codersdk.TaskLogEntry{
|
||||
ID: int(m.Id),
|
||||
Content: m.Content,
|
||||
Type: typ,
|
||||
Time: m.Time,
|
||||
})
|
||||
}
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
// @Summary Get AI task logs
|
||||
// @ID get-ai-task-logs
|
||||
// @Security CoderSessionToken
|
||||
@@ -796,9 +823,43 @@ func (api *API) taskLogs(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
task := httpmw.TaskParam(r)
|
||||
|
||||
switch task.Status {
|
||||
case database.TaskStatusActive:
|
||||
// Active tasks: fetch live logs from AgentAPI.
|
||||
out, err := api.fetchLiveTaskLogs(r, task)
|
||||
if err != nil {
|
||||
httperror.WriteResponseError(ctx, rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
httpapi.Write(ctx, rw, http.StatusOK, out)
|
||||
|
||||
case database.TaskStatusPaused, database.TaskStatusPending, database.TaskStatusInitializing:
|
||||
// In pause, pending and initializing states, we attempt to fetch
|
||||
// the snapshot from database to provide continuity.
|
||||
out, err := api.fetchSnapshotTaskLogs(ctx, task.ID)
|
||||
if err != nil {
|
||||
httperror.WriteResponseError(ctx, rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
httpapi.Write(ctx, rw, http.StatusOK, out)
|
||||
|
||||
default:
|
||||
// Cases: database.TaskStatusError, database.TaskStatusUnknown.
|
||||
// - Error: snapshot would be stale from previous pause.
|
||||
// - Unknown: cannot determine reliable state.
|
||||
httpapi.Write(ctx, rw, http.StatusConflict, codersdk.Response{
|
||||
Message: "Cannot fetch logs for task in current state.",
|
||||
Detail: fmt.Sprintf("Task status is %q.", task.Status),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (api *API) fetchLiveTaskLogs(r *http.Request, task database.Task) (codersdk.TaskLogsResponse, error) {
|
||||
var out codersdk.TaskLogsResponse
|
||||
if err := api.authAndDoWithTaskAppClient(r, task, func(ctx context.Context, client *http.Client, appURL *url.URL) error {
|
||||
agentAPIClient, err := aiagentapi.NewClient(appURL.String(), aiagentapi.WithHTTPClient(client))
|
||||
err := api.authAndDoWithTaskAppClient(r, task, func(ctx context.Context, client *http.Client, appURL *url.URL) error {
|
||||
agentAPIClient, err := agentapisdk.NewClient(appURL.String(), agentapisdk.WithHTTPClient(client))
|
||||
if err != nil {
|
||||
return httperror.NewResponseError(http.StatusBadGateway, codersdk.Response{
|
||||
Message: "Failed to create agentapi client.",
|
||||
@@ -814,35 +875,89 @@ func (api *API) taskLogs(rw http.ResponseWriter, r *http.Request) {
|
||||
})
|
||||
}
|
||||
|
||||
logs := make([]codersdk.TaskLogEntry, 0, len(messagesResp.Messages))
|
||||
for _, m := range messagesResp.Messages {
|
||||
var typ codersdk.TaskLogType
|
||||
switch m.Role {
|
||||
case aiagentapi.RoleUser:
|
||||
typ = codersdk.TaskLogTypeInput
|
||||
case aiagentapi.RoleAgent:
|
||||
typ = codersdk.TaskLogTypeOutput
|
||||
default:
|
||||
return httperror.NewResponseError(http.StatusBadGateway, codersdk.Response{
|
||||
Message: "Invalid task app response message role.",
|
||||
Detail: fmt.Sprintf(`Expected "user" or "agent", got %q.`, m.Role),
|
||||
})
|
||||
}
|
||||
logs = append(logs, codersdk.TaskLogEntry{
|
||||
ID: int(m.Id),
|
||||
Content: m.Content,
|
||||
Type: typ,
|
||||
Time: m.Time,
|
||||
logs, err := convertAgentAPIMessagesToLogEntries(messagesResp.Messages)
|
||||
if err != nil {
|
||||
return httperror.NewResponseError(http.StatusBadGateway, codersdk.Response{
|
||||
Message: "Invalid task app response.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
}
|
||||
out = codersdk.TaskLogsResponse{Logs: logs}
|
||||
|
||||
out = codersdk.TaskLogsResponse{
|
||||
Logs: logs,
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
httperror.WriteResponseError(ctx, rw, err)
|
||||
return
|
||||
})
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (api *API) fetchSnapshotTaskLogs(ctx context.Context, taskID uuid.UUID) (codersdk.TaskLogsResponse, error) {
|
||||
snapshot, err := api.Database.GetTaskSnapshot(ctx, taskID)
|
||||
if err != nil {
|
||||
if httpapi.IsUnauthorizedError(err) {
|
||||
return codersdk.TaskLogsResponse{}, httperror.NewResponseError(http.StatusNotFound, codersdk.Response{
|
||||
Message: "Resource not found.",
|
||||
})
|
||||
}
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
// No snapshot exists yet, return empty logs. Snapshot is true
|
||||
// because this field indicates whether the data is from the
|
||||
// live task app (false) or not (true). Since the task is
|
||||
// paused/initializing/pending, we cannot fetch live logs, so
|
||||
// snapshot must be true even with no snapshot data.
|
||||
return codersdk.TaskLogsResponse{
|
||||
Logs: []codersdk.TaskLogEntry{},
|
||||
Snapshot: true,
|
||||
}, nil
|
||||
}
|
||||
return codersdk.TaskLogsResponse{}, httperror.NewResponseError(http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error fetching task snapshot.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
httpapi.Write(ctx, rw, http.StatusOK, out)
|
||||
// Unmarshal envelope with pre-populated data field to decode once.
|
||||
envelope := TaskLogSnapshotEnvelope{
|
||||
Data: &agentapisdk.GetMessagesResponse{},
|
||||
}
|
||||
if err := json.Unmarshal(snapshot.LogSnapshot, &envelope); err != nil {
|
||||
return codersdk.TaskLogsResponse{}, httperror.NewResponseError(http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error decoding task snapshot.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
// Validate snapshot format.
|
||||
if envelope.Format != "agentapi" {
|
||||
return codersdk.TaskLogsResponse{}, httperror.NewResponseError(http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Unsupported task snapshot format.",
|
||||
Detail: fmt.Sprintf("Expected format %q, got %q.", "agentapi", envelope.Format),
|
||||
})
|
||||
}
|
||||
|
||||
// Extract agentapi data from envelope (already decoded into the correct type).
|
||||
messagesResp, ok := envelope.Data.(*agentapisdk.GetMessagesResponse)
|
||||
if !ok {
|
||||
return codersdk.TaskLogsResponse{}, httperror.NewResponseError(http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error decoding snapshot data.",
|
||||
Detail: "Unexpected data type in envelope.",
|
||||
})
|
||||
}
|
||||
|
||||
// Convert agentapi messages to log entries.
|
||||
logs, err := convertAgentAPIMessagesToLogEntries(messagesResp.Messages)
|
||||
if err != nil {
|
||||
return codersdk.TaskLogsResponse{}, httperror.NewResponseError(http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Invalid snapshot data.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
return codersdk.TaskLogsResponse{
|
||||
Logs: logs,
|
||||
Snapshot: true,
|
||||
SnapshotAt: ptr.Ref(snapshot.LogSnapshotCreatedAt),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// authAndDoWithTaskAppClient centralizes the shared logic to:
|
||||
@@ -950,3 +1065,165 @@ func (api *API) authAndDoWithTaskAppClient(
|
||||
}
|
||||
return do(ctx, client, parsedURL)
|
||||
}
|
||||
|
||||
const (
|
||||
// taskSnapshotMaxSize is the maximum size for task log snapshots (64KB).
|
||||
// Protects against excessive memory usage and database payload sizes.
|
||||
taskSnapshotMaxSize = 64 * 1024
|
||||
)
|
||||
|
||||
// TaskLogSnapshotEnvelope wraps a task log snapshot with format metadata.
|
||||
type TaskLogSnapshotEnvelope struct {
|
||||
Format string `json:"format"`
|
||||
Data any `json:"data"`
|
||||
}
|
||||
|
||||
// @Summary Upload task log snapshot
|
||||
// @ID upload-task-log-snapshot
|
||||
// @Security CoderSessionToken
|
||||
// @Accept json
|
||||
// @Tags Tasks
|
||||
// @Param task path string true "Task ID" format(uuid)
|
||||
// @Param format query string true "Snapshot format" enums(agentapi)
|
||||
// @Param request body object true "Raw snapshot payload (structure depends on format parameter)"
|
||||
// @Success 204
|
||||
// @Router /workspaceagents/me/tasks/{task}/log-snapshot [post]
|
||||
func (api *API) postWorkspaceAgentTaskLogSnapshot(rw http.ResponseWriter, r *http.Request) {
|
||||
var (
|
||||
ctx = r.Context()
|
||||
latestBuild = httpmw.LatestBuild(r)
|
||||
)
|
||||
|
||||
// Parse task ID from path.
|
||||
taskIDStr := chi.URLParam(r, "task")
|
||||
taskID, err := uuid.Parse(taskIDStr)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Invalid task ID format.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Validate format parameter (required).
|
||||
p := httpapi.NewQueryParamParser().RequiredNotEmpty("format")
|
||||
format := p.String(r.URL.Query(), "", "format")
|
||||
p.ErrorExcessParams(r.URL.Query())
|
||||
if len(p.Errors) > 0 {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Invalid query parameters.",
|
||||
Validations: p.Errors,
|
||||
})
|
||||
return
|
||||
}
|
||||
if format != "agentapi" {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Invalid format parameter.",
|
||||
Detail: fmt.Sprintf(`Only "agentapi" format is currently supported, got %q.`, format),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Verify task exists before reading the potentially large payload.
|
||||
// This prevents DoS attacks where attackers spam large payloads for
|
||||
// non-existent or deleted tasks, forcing us to read 64KB into memory
|
||||
// and do expensive JSON operations before the database rejects it.
|
||||
// The UpsertTaskSnapshot will re-fetch for RBAC validation, but this
|
||||
// early check protects against malicious load.
|
||||
task, err := api.Database.GetTaskByID(ctx, taskID)
|
||||
if err != nil {
|
||||
if httpapi.Is404Error(err) {
|
||||
httpapi.ResourceNotFound(rw)
|
||||
return
|
||||
}
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error fetching task.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Reject deleted tasks early.
|
||||
if task.DeletedAt.Valid {
|
||||
httpapi.ResourceNotFound(rw)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify task belongs to this agent's workspace.
|
||||
if !task.WorkspaceID.Valid || task.WorkspaceID.UUID != latestBuild.WorkspaceID {
|
||||
httpapi.ResourceNotFound(rw)
|
||||
return
|
||||
}
|
||||
|
||||
// Limit payload size to avoid excessive memory or data usage.
|
||||
r.Body = http.MaxBytesReader(rw, r.Body, taskSnapshotMaxSize)
|
||||
|
||||
// Create envelope to store validated payload.
|
||||
envelope := TaskLogSnapshotEnvelope{
|
||||
Format: format,
|
||||
}
|
||||
|
||||
switch format {
|
||||
case "agentapi":
|
||||
var payload agentapisdk.GetMessagesResponse
|
||||
if err := json.NewDecoder(r.Body).Decode(&payload); err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Failed to decode request payload.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
// Verify messages field exists (can be empty array).
|
||||
if payload.Messages == nil {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Invalid agentapi payload structure.",
|
||||
Detail: `Missing required "messages" field.`,
|
||||
})
|
||||
return
|
||||
}
|
||||
envelope.Data = payload
|
||||
default:
|
||||
// Defensive branch, we already validated "agentapi" format but may add
|
||||
// more formats in the future.
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Invalid format parameter.",
|
||||
Detail: fmt.Sprintf(`Only "agentapi" format is currently supported, got %q.`, format),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Marshal envelope with validated payload in a single pass.
|
||||
snapshotJSON, err := json.Marshal(envelope)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to create snapshot envelope.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Upsert to database using agent's RBAC context.
|
||||
err = api.Database.UpsertTaskSnapshot(ctx, database.UpsertTaskSnapshotParams{
|
||||
TaskID: task.ID,
|
||||
LogSnapshot: json.RawMessage(snapshotJSON),
|
||||
LogSnapshotCreatedAt: dbtime.Time(api.Clock.Now()),
|
||||
})
|
||||
if err != nil {
|
||||
if httpapi.IsUnauthorizedError(err) {
|
||||
httpapi.ResourceNotFound(rw)
|
||||
return
|
||||
}
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error storing snapshot.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
api.Logger.Debug(ctx, "stored task log snapshot",
|
||||
slog.F("task_id", task.ID),
|
||||
slog.F("workspace_id", latestBuild.WorkspaceID),
|
||||
slog.F("snapshot_size_bytes", len(snapshotJSON)))
|
||||
|
||||
rw.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
@@ -1,15 +1,18 @@
|
||||
package coderd_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -17,6 +20,7 @@ import (
|
||||
agentapisdk "github.com/coder/agentapi-sdk-go"
|
||||
"github.com/coder/coder/v2/agent"
|
||||
"github.com/coder/coder/v2/agent/agenttest"
|
||||
"github.com/coder/coder/v2/coderd"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
@@ -720,6 +724,266 @@ func TestTasks(t *testing.T) {
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("LogsWithSnapshot", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ownerClient, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{})
|
||||
owner := coderdtest.CreateFirstUser(t, ownerClient)
|
||||
|
||||
ownerUser, err := ownerClient.User(testutil.Context(t, testutil.WaitMedium), owner.UserID.String())
|
||||
require.NoError(t, err)
|
||||
ownerSubject := coderdtest.AuthzUserSubject(ownerUser)
|
||||
|
||||
// Create a regular user to test snapshot access.
|
||||
client, user := coderdtest.CreateAnotherUser(t, ownerClient, owner.OrganizationID)
|
||||
|
||||
// Helper to create a task in the desired state.
|
||||
createTaskInState := func(ctx context.Context, t *testing.T, status database.TaskStatus) uuid.UUID {
|
||||
ctx = dbauthz.As(ctx, ownerSubject)
|
||||
|
||||
builder := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: owner.OrganizationID,
|
||||
OwnerID: user.ID,
|
||||
}).
|
||||
WithTask(database.TaskTable{
|
||||
OrganizationID: owner.OrganizationID,
|
||||
OwnerID: user.ID,
|
||||
}, nil)
|
||||
|
||||
switch status {
|
||||
case database.TaskStatusPending:
|
||||
builder = builder.Pending()
|
||||
case database.TaskStatusInitializing:
|
||||
builder = builder.Starting()
|
||||
case database.TaskStatusPaused:
|
||||
builder = builder.Seed(database.WorkspaceBuild{
|
||||
Transition: database.WorkspaceTransitionStop,
|
||||
})
|
||||
case database.TaskStatusError:
|
||||
// For error state, create a completed build then manipulate app health.
|
||||
default:
|
||||
require.Fail(t, "unsupported task status in test helper", "status: %s", status)
|
||||
}
|
||||
|
||||
resp := builder.Do()
|
||||
taskID := resp.Task.ID
|
||||
|
||||
// Post-process by manipulating agent and app state.
|
||||
if status == database.TaskStatusError {
|
||||
// First, set agent to ready state so agent_status returns 'active'.
|
||||
// This ensures the cascade reaches app_status.
|
||||
err := db.UpdateWorkspaceAgentLifecycleStateByID(ctx, database.UpdateWorkspaceAgentLifecycleStateByIDParams{
|
||||
ID: resp.Agents[0].ID,
|
||||
LifecycleState: database.WorkspaceAgentLifecycleStateReady,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then set workspace app health to unhealthy to trigger error state.
|
||||
apps, err := db.GetWorkspaceAppsByAgentID(ctx, resp.Agents[0].ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, apps, 1, "expected exactly one app for task")
|
||||
|
||||
err = db.UpdateWorkspaceAppHealthByID(ctx, database.UpdateWorkspaceAppHealthByIDParams{
|
||||
ID: apps[0].ID,
|
||||
Health: database.WorkspaceAppHealthUnhealthy,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
return taskID
|
||||
}
|
||||
|
||||
// Prepare snapshot data used across tests.
|
||||
snapshotMessages := []agentapisdk.Message{
|
||||
{
|
||||
Id: 0,
|
||||
Content: "First message",
|
||||
Role: agentapisdk.RoleAgent,
|
||||
Time: time.Date(2025, 1, 1, 10, 0, 0, 0, time.UTC),
|
||||
},
|
||||
{
|
||||
Id: 1,
|
||||
Content: "Second message",
|
||||
Role: agentapisdk.RoleUser,
|
||||
Time: time.Date(2025, 1, 1, 10, 1, 0, 0, time.UTC),
|
||||
},
|
||||
}
|
||||
|
||||
snapshotData := agentapisdk.GetMessagesResponse{
|
||||
Messages: snapshotMessages,
|
||||
}
|
||||
|
||||
envelope := coderd.TaskLogSnapshotEnvelope{
|
||||
Format: "agentapi",
|
||||
Data: snapshotData,
|
||||
}
|
||||
|
||||
snapshotJSON, err := json.Marshal(envelope)
|
||||
require.NoError(t, err)
|
||||
|
||||
snapshotTime := time.Date(2025, 1, 1, 10, 5, 0, 0, time.UTC)
|
||||
|
||||
// Helper to verify snapshot logs content.
|
||||
verifySnapshotLogs := func(t *testing.T, got codersdk.TaskLogsResponse) {
|
||||
t.Helper()
|
||||
want := codersdk.TaskLogsResponse{
|
||||
Snapshot: true,
|
||||
SnapshotAt: &snapshotTime,
|
||||
Logs: []codersdk.TaskLogEntry{
|
||||
{
|
||||
ID: 0,
|
||||
Type: codersdk.TaskLogTypeOutput,
|
||||
Content: "First message",
|
||||
Time: snapshotMessages[0].Time,
|
||||
},
|
||||
{
|
||||
ID: 1,
|
||||
Type: codersdk.TaskLogTypeInput,
|
||||
Content: "Second message",
|
||||
Time: snapshotMessages[1].Time,
|
||||
},
|
||||
},
|
||||
}
|
||||
if diff := cmp.Diff(want, got); diff != "" {
|
||||
t.Errorf("got bad response (-want +got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("PendingTaskReturnsSnapshot", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusPending)
|
||||
|
||||
err := db.UpsertTaskSnapshot(dbauthz.As(ctx, ownerSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskID,
|
||||
LogSnapshot: json.RawMessage(snapshotJSON),
|
||||
LogSnapshotCreatedAt: snapshotTime,
|
||||
})
|
||||
require.NoError(t, err, "upserting task snapshot")
|
||||
|
||||
logsResp, err := client.TaskLogs(ctx, "me", taskID)
|
||||
require.NoError(t, err, "fetching task logs")
|
||||
verifySnapshotLogs(t, logsResp)
|
||||
})
|
||||
|
||||
t.Run("InitializingTaskReturnsSnapshot", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusInitializing)
|
||||
|
||||
err := db.UpsertTaskSnapshot(dbauthz.As(ctx, ownerSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskID,
|
||||
LogSnapshot: json.RawMessage(snapshotJSON),
|
||||
LogSnapshotCreatedAt: snapshotTime,
|
||||
})
|
||||
require.NoError(t, err, "upserting task snapshot")
|
||||
|
||||
logsResp, err := client.TaskLogs(ctx, "me", taskID)
|
||||
require.NoError(t, err, "fetching task logs")
|
||||
verifySnapshotLogs(t, logsResp)
|
||||
})
|
||||
|
||||
t.Run("PausedTaskReturnsSnapshot", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusPaused)
|
||||
|
||||
err := db.UpsertTaskSnapshot(dbauthz.As(ctx, ownerSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskID,
|
||||
LogSnapshot: json.RawMessage(snapshotJSON),
|
||||
LogSnapshotCreatedAt: snapshotTime,
|
||||
})
|
||||
require.NoError(t, err, "upserting task snapshot")
|
||||
|
||||
logsResp, err := client.TaskLogs(ctx, "me", taskID)
|
||||
require.NoError(t, err, "fetching task logs")
|
||||
verifySnapshotLogs(t, logsResp)
|
||||
})
|
||||
|
||||
t.Run("NoSnapshotReturnsEmpty", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusPending)
|
||||
|
||||
logsResp, err := client.TaskLogs(ctx, "me", taskID)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, logsResp.Snapshot)
|
||||
assert.Nil(t, logsResp.SnapshotAt)
|
||||
assert.Len(t, logsResp.Logs, 0)
|
||||
})
|
||||
|
||||
t.Run("InvalidSnapshotFormat", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusPending)
|
||||
|
||||
invalidEnvelope := coderd.TaskLogSnapshotEnvelope{
|
||||
Format: "unknown-format",
|
||||
Data: map[string]any{},
|
||||
}
|
||||
invalidJSON, err := json.Marshal(invalidEnvelope)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.UpsertTaskSnapshot(dbauthz.As(ctx, ownerSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskID,
|
||||
LogSnapshot: json.RawMessage(invalidJSON),
|
||||
LogSnapshotCreatedAt: snapshotTime,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = client.TaskLogs(ctx, "me", taskID)
|
||||
require.Error(t, err)
|
||||
|
||||
var sdkErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
assert.Equal(t, http.StatusInternalServerError, sdkErr.StatusCode())
|
||||
assert.Contains(t, sdkErr.Message, "Unsupported task snapshot format")
|
||||
})
|
||||
|
||||
t.Run("MalformedSnapshotData", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusPending)
|
||||
|
||||
err := db.UpsertTaskSnapshot(dbauthz.As(ctx, ownerSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskID,
|
||||
LogSnapshot: json.RawMessage(`{"format":"agentapi","data":"not an object"}`),
|
||||
LogSnapshotCreatedAt: snapshotTime,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = client.TaskLogs(ctx, "me", taskID)
|
||||
require.Error(t, err)
|
||||
|
||||
var sdkErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
assert.Equal(t, http.StatusInternalServerError, sdkErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("ErrorStateReturnsError", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusError)
|
||||
|
||||
_, err := client.TaskLogs(ctx, "me", taskID)
|
||||
require.Error(t, err)
|
||||
|
||||
var sdkErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
assert.Equal(t, http.StatusConflict, sdkErr.StatusCode())
|
||||
assert.Contains(t, sdkErr.Message, "Cannot fetch logs for task in current state")
|
||||
assert.Contains(t, sdkErr.Detail, "error")
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("UpdateInput", func(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -1657,3 +1921,271 @@ func TestTasksNotification(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostWorkspaceAgentTaskSnapshot(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Shared coderd with mock clock for all tests.
|
||||
clock := quartz.NewMock(t)
|
||||
ownerClient, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
Clock: clock,
|
||||
})
|
||||
owner := coderdtest.CreateFirstUser(t, ownerClient)
|
||||
|
||||
createTaskWorkspace := func(t *testing.T, agentToken string) (taskID uuid.UUID, workspaceID uuid.UUID) {
|
||||
t.Helper()
|
||||
workspaceBuild := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: owner.OrganizationID,
|
||||
OwnerID: owner.UserID,
|
||||
}).WithTask(database.TaskTable{
|
||||
Prompt: "test prompt",
|
||||
}, &proto.App{
|
||||
Slug: "task-app",
|
||||
Url: "http://localhost:8080",
|
||||
}).WithAgent(func(agents []*proto.Agent) []*proto.Agent {
|
||||
agents[0].Auth = &proto.Agent_Token{Token: agentToken}
|
||||
return agents
|
||||
}).Do()
|
||||
return workspaceBuild.Task.ID, workspaceBuild.Workspace.ID
|
||||
}
|
||||
|
||||
makePayload := func(t *testing.T, content string) []byte {
|
||||
t.Helper()
|
||||
data := agentapisdk.GetMessagesResponse{
|
||||
Messages: []agentapisdk.Message{
|
||||
{Id: 0, Role: "agent", Content: content, Time: time.Now()},
|
||||
},
|
||||
}
|
||||
b, err := json.Marshal(data)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}
|
||||
|
||||
makeRequest := func(t *testing.T, taskID uuid.UUID, agentToken string, payload []byte, format string) *http.Response {
|
||||
t.Helper()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
url := ownerClient.URL.JoinPath("/api/v2/workspaceagents/me/tasks", taskID.String(), "log-snapshot").String()
|
||||
if format != "" {
|
||||
url += "?format=" + format
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(payload))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set(codersdk.SessionTokenHeader, agentToken)
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
return res
|
||||
}
|
||||
|
||||
unmarshalSnapshot := func(t *testing.T, snapshotJSON json.RawMessage) agentapisdk.GetMessagesResponse {
|
||||
t.Helper()
|
||||
// Pre-populate Data with the correct type so json.Unmarshal decodes
|
||||
// directly into it instead of creating a map[string]any.
|
||||
envelope := coderd.TaskLogSnapshotEnvelope{
|
||||
Data: &agentapisdk.GetMessagesResponse{},
|
||||
}
|
||||
err := json.Unmarshal(snapshotJSON, &envelope)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "agentapi", envelope.Format)
|
||||
|
||||
return *envelope.Data.(*agentapisdk.GetMessagesResponse)
|
||||
}
|
||||
|
||||
t.Run("Success", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
agentToken := uuid.NewString()
|
||||
taskID, _ := createTaskWorkspace(t, agentToken)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
res := makeRequest(t, taskID, agentToken, makePayload(t, "test"), "agentapi")
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusNoContent, res.StatusCode)
|
||||
|
||||
snapshot, err := db.GetTaskSnapshot(dbauthz.AsSystemRestricted(ctx), taskID)
|
||||
require.NoError(t, err)
|
||||
|
||||
data := unmarshalSnapshot(t, snapshot.LogSnapshot)
|
||||
require.Len(t, data.Messages, 1)
|
||||
require.Equal(t, "test", data.Messages[0].Content)
|
||||
})
|
||||
|
||||
//nolint:paralleltest // Not parallel, advances shared clock.
|
||||
t.Run("Overwrite", func(t *testing.T) {
|
||||
agentToken := uuid.NewString()
|
||||
taskID, _ := createTaskWorkspace(t, agentToken)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
// First snapshot.
|
||||
res1 := makeRequest(t, taskID, agentToken, makePayload(t, "first"), "agentapi")
|
||||
res1.Body.Close()
|
||||
require.Equal(t, http.StatusNoContent, res1.StatusCode)
|
||||
|
||||
snapshot1, err := db.GetTaskSnapshot(dbauthz.AsSystemRestricted(ctx), taskID)
|
||||
require.NoError(t, err)
|
||||
firstTime := snapshot1.LogSnapshotCreatedAt
|
||||
|
||||
// Advance clock to ensure timestamp differs.
|
||||
clock.Advance(time.Second)
|
||||
|
||||
// Second snapshot.
|
||||
res2 := makeRequest(t, taskID, agentToken, makePayload(t, "second"), "agentapi")
|
||||
res2.Body.Close()
|
||||
require.Equal(t, http.StatusNoContent, res2.StatusCode)
|
||||
|
||||
snapshot2, err := db.GetTaskSnapshot(dbauthz.AsSystemRestricted(ctx), taskID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, snapshot2.LogSnapshotCreatedAt.After(firstTime))
|
||||
|
||||
// Verify data was overwritten.
|
||||
data := unmarshalSnapshot(t, snapshot2.LogSnapshot)
|
||||
require.Len(t, data.Messages, 1)
|
||||
require.Equal(t, "second", data.Messages[0].Content)
|
||||
})
|
||||
|
||||
t.Run("MissingFormat", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
agentToken := uuid.NewString()
|
||||
taskID, _ := createTaskWorkspace(t, agentToken)
|
||||
|
||||
res := makeRequest(t, taskID, agentToken, makePayload(t, "test"), "")
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusBadRequest, res.StatusCode)
|
||||
|
||||
var errResp codersdk.Response
|
||||
json.NewDecoder(res.Body).Decode(&errResp)
|
||||
require.Contains(t, errResp.Message, "Invalid query parameters")
|
||||
require.Len(t, errResp.Validations, 1)
|
||||
require.Equal(t, "format", errResp.Validations[0].Field)
|
||||
require.Contains(t, errResp.Validations[0].Detail, "required and cannot be empty")
|
||||
})
|
||||
|
||||
t.Run("InvalidFormat", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
agentToken := uuid.NewString()
|
||||
taskID, _ := createTaskWorkspace(t, agentToken)
|
||||
|
||||
res := makeRequest(t, taskID, agentToken, makePayload(t, "test"), "unknown")
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusBadRequest, res.StatusCode)
|
||||
|
||||
var errResp codersdk.Response
|
||||
json.NewDecoder(res.Body).Decode(&errResp)
|
||||
require.Contains(t, errResp.Message, "Invalid format parameter")
|
||||
})
|
||||
|
||||
t.Run("PayloadTooLarge", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
agentToken := uuid.NewString()
|
||||
taskID, _ := createTaskWorkspace(t, agentToken)
|
||||
|
||||
largeContent := strings.Repeat("x", 65*1024)
|
||||
payload := makePayload(t, largeContent)
|
||||
|
||||
res := makeRequest(t, taskID, agentToken, payload, "agentapi")
|
||||
require.Equal(t, http.StatusBadRequest, res.StatusCode)
|
||||
res.Body.Close()
|
||||
})
|
||||
|
||||
t.Run("InvalidTaskID", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
agentToken := uuid.NewString()
|
||||
createTaskWorkspace(t, agentToken)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
url := ownerClient.URL.JoinPath("/api/v2/workspaceagents/me/tasks", "not-a-uuid", "log-snapshot").String() + "?format=agentapi"
|
||||
req, _ := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(makePayload(t, "test")))
|
||||
req.Header.Set(codersdk.SessionTokenHeader, agentToken)
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusBadRequest, res.StatusCode)
|
||||
|
||||
var errResp codersdk.Response
|
||||
json.NewDecoder(res.Body).Decode(&errResp)
|
||||
require.Contains(t, errResp.Message, "Invalid task ID format")
|
||||
})
|
||||
|
||||
t.Run("TaskNotFound", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
agentToken := uuid.NewString()
|
||||
createTaskWorkspace(t, agentToken)
|
||||
|
||||
res := makeRequest(t, uuid.New(), agentToken, makePayload(t, "test"), "agentapi")
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusNotFound, res.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("WrongWorkspace", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
agent1Token := uuid.NewString()
|
||||
agent2Token := uuid.NewString()
|
||||
taskID1, _ := createTaskWorkspace(t, agent1Token)
|
||||
taskID2, _ := createTaskWorkspace(t, agent2Token)
|
||||
|
||||
// Try to POST snapshot for task2 using agent1's token.
|
||||
res := makeRequest(t, taskID2, agent1Token, makePayload(t, "test"), "agentapi")
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusNotFound, res.StatusCode)
|
||||
|
||||
// Verify we CAN post for our own task.
|
||||
res2 := makeRequest(t, taskID1, agent1Token, makePayload(t, "test"), "agentapi")
|
||||
defer res2.Body.Close()
|
||||
require.Equal(t, http.StatusNoContent, res2.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("Unauthorized", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
agentToken := uuid.NewString()
|
||||
taskID, _ := createTaskWorkspace(t, agentToken)
|
||||
|
||||
res := makeRequest(t, taskID, "", makePayload(t, "test"), "agentapi")
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusUnauthorized, res.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("MalformedJSON", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
agentToken := uuid.NewString()
|
||||
taskID, _ := createTaskWorkspace(t, agentToken)
|
||||
|
||||
res := makeRequest(t, taskID, agentToken, []byte("{invalid json"), "agentapi")
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusBadRequest, res.StatusCode)
|
||||
|
||||
var errResp codersdk.Response
|
||||
json.NewDecoder(res.Body).Decode(&errResp)
|
||||
require.Contains(t, errResp.Message, "Failed to decode request payload")
|
||||
})
|
||||
|
||||
t.Run("InvalidAgentAPIPayload", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
agentToken := uuid.NewString()
|
||||
taskID, _ := createTaskWorkspace(t, agentToken)
|
||||
|
||||
// Missing required "messages" field.
|
||||
res := makeRequest(t, taskID, agentToken, []byte(`{"truncated":false,"total_count":0}`), "agentapi")
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusBadRequest, res.StatusCode)
|
||||
|
||||
var errResp codersdk.Response
|
||||
json.NewDecoder(res.Body).Decode(&errResp)
|
||||
require.Contains(t, errResp.Message, "Invalid agentapi payload structure")
|
||||
})
|
||||
|
||||
t.Run("DeletedTask", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
agentToken := uuid.NewString()
|
||||
taskID, _ := createTaskWorkspace(t, agentToken)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
// Delete the task.
|
||||
err := ownerClient.DeleteTask(ctx, owner.UserID.String(), taskID)
|
||||
require.NoError(t, err)
|
||||
|
||||
res := makeRequest(t, taskID, agentToken, makePayload(t, "test"), "agentapi")
|
||||
defer res.Body.Close()
|
||||
// Agent token becomes invalid after task deletion.
|
||||
require.Equal(t, http.StatusUnauthorized, res.StatusCode)
|
||||
})
|
||||
}
|
||||
|
||||
Generated
+70
@@ -9556,6 +9556,57 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/workspaceagents/me/tasks/{task}/log-snapshot": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Tasks"
|
||||
],
|
||||
"summary": "Upload task log snapshot",
|
||||
"operationId": "upload-task-log-snapshot",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Task ID",
|
||||
"name": "task",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"enum": [
|
||||
"agentapi"
|
||||
],
|
||||
"type": "string",
|
||||
"description": "Snapshot format",
|
||||
"name": "format",
|
||||
"in": "query",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"description": "Raw snapshot payload (structure depends on format parameter)",
|
||||
"name": "request",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"204": {
|
||||
"description": "No Content"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/workspaceagents/{workspaceagent}": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -12075,6 +12126,9 @@ const docTemplate = `{
|
||||
"retention": {
|
||||
"type": "integer"
|
||||
},
|
||||
"send_actor_headers": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"structured_logging": {
|
||||
"type": "boolean"
|
||||
}
|
||||
@@ -12413,6 +12467,10 @@ const docTemplate = `{
|
||||
"audit_log:*",
|
||||
"audit_log:create",
|
||||
"audit_log:read",
|
||||
"boundary_usage:*",
|
||||
"boundary_usage:delete",
|
||||
"boundary_usage:read",
|
||||
"boundary_usage:update",
|
||||
"coder:all",
|
||||
"coder:apikeys.manage_self",
|
||||
"coder:application_connect",
|
||||
@@ -12611,6 +12669,10 @@ const docTemplate = `{
|
||||
"APIKeyScopeAuditLogAll",
|
||||
"APIKeyScopeAuditLogCreate",
|
||||
"APIKeyScopeAuditLogRead",
|
||||
"APIKeyScopeBoundaryUsageAll",
|
||||
"APIKeyScopeBoundaryUsageDelete",
|
||||
"APIKeyScopeBoundaryUsageRead",
|
||||
"APIKeyScopeBoundaryUsageUpdate",
|
||||
"APIKeyScopeCoderAll",
|
||||
"APIKeyScopeCoderApikeysManageSelf",
|
||||
"APIKeyScopeCoderApplicationConnect",
|
||||
@@ -17686,6 +17748,7 @@ const docTemplate = `{
|
||||
"assign_org_role",
|
||||
"assign_role",
|
||||
"audit_log",
|
||||
"boundary_usage",
|
||||
"connection_log",
|
||||
"crypto_key",
|
||||
"debug_info",
|
||||
@@ -17730,6 +17793,7 @@ const docTemplate = `{
|
||||
"ResourceAssignOrgRole",
|
||||
"ResourceAssignRole",
|
||||
"ResourceAuditLog",
|
||||
"ResourceBoundaryUsage",
|
||||
"ResourceConnectionLog",
|
||||
"ResourceCryptoKey",
|
||||
"ResourceDebugInfo",
|
||||
@@ -18503,6 +18567,12 @@ const docTemplate = `{
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.TaskLogEntry"
|
||||
}
|
||||
},
|
||||
"snapshot": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"snapshot_at": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
Generated
+64
@@ -8449,6 +8449,51 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/workspaceagents/me/tasks/{task}/log-snapshot": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"consumes": ["application/json"],
|
||||
"tags": ["Tasks"],
|
||||
"summary": "Upload task log snapshot",
|
||||
"operationId": "upload-task-log-snapshot",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Task ID",
|
||||
"name": "task",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"enum": ["agentapi"],
|
||||
"type": "string",
|
||||
"description": "Snapshot format",
|
||||
"name": "format",
|
||||
"in": "query",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"description": "Raw snapshot payload (structure depends on format parameter)",
|
||||
"name": "request",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"204": {
|
||||
"description": "No Content"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/workspaceagents/{workspaceagent}": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -10727,6 +10772,9 @@
|
||||
"retention": {
|
||||
"type": "integer"
|
||||
},
|
||||
"send_actor_headers": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"structured_logging": {
|
||||
"type": "boolean"
|
||||
}
|
||||
@@ -11057,6 +11105,10 @@
|
||||
"audit_log:*",
|
||||
"audit_log:create",
|
||||
"audit_log:read",
|
||||
"boundary_usage:*",
|
||||
"boundary_usage:delete",
|
||||
"boundary_usage:read",
|
||||
"boundary_usage:update",
|
||||
"coder:all",
|
||||
"coder:apikeys.manage_self",
|
||||
"coder:application_connect",
|
||||
@@ -11255,6 +11307,10 @@
|
||||
"APIKeyScopeAuditLogAll",
|
||||
"APIKeyScopeAuditLogCreate",
|
||||
"APIKeyScopeAuditLogRead",
|
||||
"APIKeyScopeBoundaryUsageAll",
|
||||
"APIKeyScopeBoundaryUsageDelete",
|
||||
"APIKeyScopeBoundaryUsageRead",
|
||||
"APIKeyScopeBoundaryUsageUpdate",
|
||||
"APIKeyScopeCoderAll",
|
||||
"APIKeyScopeCoderApikeysManageSelf",
|
||||
"APIKeyScopeCoderApplicationConnect",
|
||||
@@ -16134,6 +16190,7 @@
|
||||
"assign_org_role",
|
||||
"assign_role",
|
||||
"audit_log",
|
||||
"boundary_usage",
|
||||
"connection_log",
|
||||
"crypto_key",
|
||||
"debug_info",
|
||||
@@ -16178,6 +16235,7 @@
|
||||
"ResourceAssignOrgRole",
|
||||
"ResourceAssignRole",
|
||||
"ResourceAuditLog",
|
||||
"ResourceBoundaryUsage",
|
||||
"ResourceConnectionLog",
|
||||
"ResourceCryptoKey",
|
||||
"ResourceDebugInfo",
|
||||
@@ -16925,6 +16983,12 @@
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.TaskLogEntry"
|
||||
}
|
||||
},
|
||||
"snapshot": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"snapshot_at": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -0,0 +1,79 @@
|
||||
// Package boundaryusage tracks workspace boundary usage for telemetry reporting.
|
||||
// The design intent is to track trends and rough usage patterns.
|
||||
//
|
||||
// Each replica does in-memory usage tracking. Boundary usage is inferred at the
|
||||
// control plane when workspace agents call the ReportBoundaryLogs RPC. Accumulated
|
||||
// stats are periodically flushed to a database table keyed by replica ID. Telemetry
|
||||
// aggregates are computed across all replicas when generating snapshots.
|
||||
//
|
||||
// Aggregate Precision:
|
||||
//
|
||||
// The aggregated stats represent approximate usage over roughly the telemetry
|
||||
// snapshot interval, not a precise time window. This imprecision arises because:
|
||||
//
|
||||
// - Each replica flushes independently, so their data covers slightly different
|
||||
// time ranges (varying by up to the flush interval)
|
||||
// - Unflushed in-memory data at snapshot time rolls into the next period
|
||||
// - The snapshot captures "data flushed since last reset" rather than "usage
|
||||
// during exactly the last N minutes"
|
||||
//
|
||||
// We accept this imprecision to keep the architecture simple. Each replica
|
||||
// operates independently and flushes to the database on their own schedule.
|
||||
// This approach also minimizes database load. The table contains at most one
|
||||
// row per replica, so flushes are just upserts, and resets only delete N
|
||||
// rows. There's no accumulation of historical data to clean up. The only
|
||||
// synchronization is a database lock that ensures exactly one replica reports
|
||||
// telemetry per period.
|
||||
//
|
||||
// Known Shortcomings:
|
||||
//
|
||||
// - Unique workspace/user counts may be inflated when the same workspace or
|
||||
// user connects through multiple replicas, as each replica tracks its own
|
||||
// unique set
|
||||
// - Ad-hoc boundary usage in a workspace may not be accounted for e.g. if
|
||||
// the boundary command is invoked directly with the --log-proxy-socket-path
|
||||
// flag set to something other than the Workspace agent server.
|
||||
//
|
||||
// Implementation:
|
||||
//
|
||||
// The Tracker maintains sets of unique workspace IDs and user IDs, plus request
|
||||
// counters. When boundary logs are reported, Track() adds the IDs to the sets
|
||||
// and increments request counters.
|
||||
//
|
||||
// FlushToDB() writes stats to the database, replacing all values with the current
|
||||
// in-memory state. Stats accumulate in memory throughout the telemetry period.
|
||||
//
|
||||
// A new period is detected when the upsert results in an INSERT (meaning
|
||||
// telemetry deleted the replica's row). At that point, all in-memory stats are
|
||||
// reset so they only count usage within the new period.
|
||||
//
|
||||
// Below is a sequence diagram showing the flow of boundary usage tracking.
|
||||
//
|
||||
// ┌───────┐ ┌───────────────┐ ┌──────────┐ ┌────┐ ┌───────────┐
|
||||
// │ Agent │ │BoundaryLogsAPI│ │ Tracker │ │ DB │ │ Telemetry │
|
||||
// └───┬───┘ └───────┬───────┘ └────┬─────┘ └──┬─┘ └─────┬─────┘
|
||||
// │ │ │ │ │
|
||||
// │ ReportBoundaryLogs│ │ │ │
|
||||
// ├──────────────────►│ │ │ │
|
||||
// │ │ Track(...) │ │ │
|
||||
// │ ├────────────────►│ │ │
|
||||
// │ : │ │ │ │
|
||||
// │ : │ │ │ │
|
||||
// │ ReportBoundaryLogs│ │ │ │
|
||||
// ├──────────────────►│ │ │ │
|
||||
// │ │ Track(...) │ │ │
|
||||
// │ ├────────────────►│ │ │
|
||||
// │ │ │ │ │
|
||||
// │ │ │ FlushToDB │ │
|
||||
// │ │ ├────────────►│ │
|
||||
// │ │ │ : │ │
|
||||
// │ │ │ : │ │
|
||||
// │ │ │ FlushToDB │ │
|
||||
// │ │ ├────────────►│ │
|
||||
// │ │ │ │ │
|
||||
// │ │ │ │ Snapshot │
|
||||
// │ │ │ │ interval │
|
||||
// │ │ │ │◄───────────┤
|
||||
// │ │ │ │ Aggregate │
|
||||
// │ │ │ │ & Reset │
|
||||
package boundaryusage
|
||||
@@ -0,0 +1,105 @@
|
||||
package boundaryusage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
)
|
||||
|
||||
// Tracker tracks boundary usage for telemetry reporting.
|
||||
//
|
||||
// All stats accumulate in memory throughout a telemetry period and are only
|
||||
// reset when a new period begins.
|
||||
type Tracker struct {
|
||||
mu sync.Mutex
|
||||
workspaces map[uuid.UUID]struct{}
|
||||
users map[uuid.UUID]struct{}
|
||||
allowedRequests int64
|
||||
deniedRequests int64
|
||||
}
|
||||
|
||||
// NewTracker creates a new boundary usage tracker.
|
||||
func NewTracker() *Tracker {
|
||||
return &Tracker{
|
||||
workspaces: make(map[uuid.UUID]struct{}),
|
||||
users: make(map[uuid.UUID]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Track records boundary usage for a workspace.
|
||||
func (t *Tracker) Track(workspaceID, ownerID uuid.UUID, allowed, denied int64) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
t.workspaces[workspaceID] = struct{}{}
|
||||
t.users[ownerID] = struct{}{}
|
||||
t.allowedRequests += allowed
|
||||
t.deniedRequests += denied
|
||||
}
|
||||
|
||||
// FlushToDB writes the accumulated stats to the database. All values are
|
||||
// replaced in the database (they represent the current in-memory state). If the
|
||||
// database row was deleted (new telemetry period), all in-memory stats are reset.
|
||||
func (t *Tracker) FlushToDB(ctx context.Context, db database.Store, replicaID uuid.UUID) error {
|
||||
t.mu.Lock()
|
||||
workspaceCount := int64(len(t.workspaces))
|
||||
userCount := int64(len(t.users))
|
||||
allowed := t.allowedRequests
|
||||
denied := t.deniedRequests
|
||||
t.mu.Unlock()
|
||||
|
||||
// Don't flush if there's no activity.
|
||||
if workspaceCount == 0 && userCount == 0 && allowed == 0 && denied == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
//nolint:gocritic // This is the actual package doing boundary usage tracking.
|
||||
newPeriod, err := db.UpsertBoundaryUsageStats(dbauthz.AsBoundaryUsageTracker(ctx), database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replicaID,
|
||||
UniqueWorkspacesCount: workspaceCount,
|
||||
UniqueUsersCount: userCount,
|
||||
AllowedRequests: allowed,
|
||||
DeniedRequests: denied,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If this was an insert (new period), reset all stats. Any Track() calls
|
||||
// that occurred during the DB operation will be counted in the next period.
|
||||
if newPeriod {
|
||||
t.mu.Lock()
|
||||
t.workspaces = make(map[uuid.UUID]struct{})
|
||||
t.users = make(map[uuid.UUID]struct{})
|
||||
t.allowedRequests = 0
|
||||
t.deniedRequests = 0
|
||||
t.mu.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartFlushLoop begins the periodic flush loop that writes accumulated stats
|
||||
// to the database. It blocks until the context is canceled. Flushes every
|
||||
// minute to keep stats reasonably fresh for telemetry collection (which runs
|
||||
// every 30 minutes by default) without excessive DB writes.
|
||||
func (t *Tracker) StartFlushLoop(ctx context.Context, log slog.Logger, db database.Store, replicaID uuid.UUID) {
|
||||
ticker := time.NewTicker(time.Minute)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
if err := t.FlushToDB(ctx, db, replicaID); err != nil {
|
||||
log.Warn(ctx, "failed to flush boundary usage stats", slog.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,542 @@
|
||||
package boundaryusage_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/boundaryusage"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtestutil"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m, testutil.GoleakOptions...)
|
||||
}
|
||||
|
||||
func TestTracker_New(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tracker := boundaryusage.NewTracker()
|
||||
require.NotNil(t, tracker)
|
||||
}
|
||||
|
||||
func TestTracker_Track_Single(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
tracker := boundaryusage.NewTracker()
|
||||
workspaceID := uuid.New()
|
||||
ownerID := uuid.New()
|
||||
replicaID := uuid.New()
|
||||
|
||||
tracker.Track(workspaceID, ownerID, 5, 2)
|
||||
|
||||
err := tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the data was written correctly.
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(1), summary.UniqueUsers)
|
||||
require.Equal(t, int64(5), summary.AllowedRequests)
|
||||
require.Equal(t, int64(2), summary.DeniedRequests)
|
||||
}
|
||||
|
||||
func TestTracker_Track_DuplicateWorkspaceUser(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
tracker := boundaryusage.NewTracker()
|
||||
workspaceID := uuid.New()
|
||||
ownerID := uuid.New()
|
||||
replicaID := uuid.New()
|
||||
|
||||
// Track same workspace/user multiple times.
|
||||
tracker.Track(workspaceID, ownerID, 3, 1)
|
||||
tracker.Track(workspaceID, ownerID, 4, 2)
|
||||
tracker.Track(workspaceID, ownerID, 2, 0)
|
||||
|
||||
err := tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), summary.UniqueWorkspaces, "should be 1 unique workspace")
|
||||
require.Equal(t, int64(1), summary.UniqueUsers, "should be 1 unique user")
|
||||
require.Equal(t, int64(9), summary.AllowedRequests, "should accumulate: 3+4+2=9")
|
||||
require.Equal(t, int64(3), summary.DeniedRequests, "should accumulate: 1+2+0=3")
|
||||
}
|
||||
|
||||
func TestTracker_Track_MultipleWorkspacesUsers(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
tracker := boundaryusage.NewTracker()
|
||||
replicaID := uuid.New()
|
||||
|
||||
// Track 3 different workspaces with 2 different users.
|
||||
workspace1, workspace2, workspace3 := uuid.New(), uuid.New(), uuid.New()
|
||||
user1, user2 := uuid.New(), uuid.New()
|
||||
|
||||
tracker.Track(workspace1, user1, 1, 0)
|
||||
tracker.Track(workspace2, user1, 2, 1)
|
||||
tracker.Track(workspace3, user2, 3, 2)
|
||||
|
||||
err := tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(3), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(2), summary.UniqueUsers)
|
||||
require.Equal(t, int64(6), summary.AllowedRequests)
|
||||
require.Equal(t, int64(3), summary.DeniedRequests)
|
||||
}
|
||||
|
||||
func TestTracker_Track_Concurrent(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
tracker := boundaryusage.NewTracker()
|
||||
replicaID := uuid.New()
|
||||
|
||||
const numGoroutines = 100
|
||||
const requestsPerGoroutine = 10
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
workspaceID := uuid.New()
|
||||
ownerID := uuid.New()
|
||||
for j := 0; j < requestsPerGoroutine; j++ {
|
||||
tracker.Track(workspaceID, ownerID, 1, 1)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
err := tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(numGoroutines), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(numGoroutines), summary.UniqueUsers)
|
||||
require.Equal(t, int64(numGoroutines*requestsPerGoroutine), summary.AllowedRequests)
|
||||
require.Equal(t, int64(numGoroutines*requestsPerGoroutine), summary.DeniedRequests)
|
||||
}
|
||||
|
||||
func TestTracker_FlushToDB_Accumulates(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
tracker := boundaryusage.NewTracker()
|
||||
replicaID := uuid.New()
|
||||
workspaceID := uuid.New()
|
||||
ownerID := uuid.New()
|
||||
|
||||
tracker.Track(workspaceID, ownerID, 5, 3)
|
||||
|
||||
// First flush is an insert, which resets in-memory stats.
|
||||
err := tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Track more data after the reset.
|
||||
tracker.Track(workspaceID, ownerID, 2, 1)
|
||||
|
||||
// Second flush is an update so stats should accumulate.
|
||||
err = tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Track even more data.
|
||||
tracker.Track(workspaceID, ownerID, 3, 2)
|
||||
|
||||
// Third flush stats should continue accumulating.
|
||||
err = tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(1), summary.UniqueUsers)
|
||||
require.Equal(t, int64(5), summary.AllowedRequests, "should accumulate after first reset: 2+3=5")
|
||||
require.Equal(t, int64(3), summary.DeniedRequests, "should accumulate after first reset: 1+2=3")
|
||||
}
|
||||
|
||||
func TestTracker_FlushToDB_NewPeriod(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
|
||||
tracker := boundaryusage.NewTracker()
|
||||
replicaID := uuid.New()
|
||||
workspaceID := uuid.New()
|
||||
ownerID := uuid.New()
|
||||
|
||||
tracker.Track(workspaceID, ownerID, 10, 5)
|
||||
|
||||
// First flush.
|
||||
err := tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Simulate telemetry reset (new period).
|
||||
err = db.ResetBoundaryUsageStats(boundaryCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Track new data.
|
||||
workspace2 := uuid.New()
|
||||
owner2 := uuid.New()
|
||||
tracker.Track(workspace2, owner2, 3, 1)
|
||||
|
||||
// Flushing again should detect new period and reset in-memory stats.
|
||||
err = tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// The summary should only contain the new data after reset.
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), summary.UniqueWorkspaces, "should only count new workspace")
|
||||
require.Equal(t, int64(1), summary.UniqueUsers, "should only count new user")
|
||||
require.Equal(t, int64(3), summary.AllowedRequests, "should only count new requests")
|
||||
require.Equal(t, int64(1), summary.DeniedRequests, "should only count new requests")
|
||||
}
|
||||
|
||||
func TestTracker_FlushToDB_NoActivity(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
tracker := boundaryusage.NewTracker()
|
||||
replicaID := uuid.New()
|
||||
|
||||
err := tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify nothing was written to DB.
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(0), summary.AllowedRequests)
|
||||
}
|
||||
|
||||
func TestUpsertBoundaryUsageStats_Insert(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := dbauthz.AsBoundaryUsageTracker(context.Background())
|
||||
|
||||
replicaID := uuid.New()
|
||||
|
||||
newPeriod, err := db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replicaID,
|
||||
UniqueWorkspacesCount: 5,
|
||||
UniqueUsersCount: 3,
|
||||
AllowedRequests: 100,
|
||||
DeniedRequests: 10,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.True(t, newPeriod, "should return true for insert")
|
||||
}
|
||||
|
||||
func TestUpsertBoundaryUsageStats_Update(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := dbauthz.AsBoundaryUsageTracker(context.Background())
|
||||
|
||||
replicaID := uuid.New()
|
||||
|
||||
// First insert.
|
||||
_, err := db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replicaID,
|
||||
UniqueWorkspacesCount: 5,
|
||||
UniqueUsersCount: 3,
|
||||
AllowedRequests: 100,
|
||||
DeniedRequests: 10,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Second upsert (update).
|
||||
newPeriod, err := db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replicaID,
|
||||
UniqueWorkspacesCount: 8,
|
||||
UniqueUsersCount: 5,
|
||||
AllowedRequests: 200,
|
||||
DeniedRequests: 20,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.False(t, newPeriod, "should return false for update")
|
||||
|
||||
// Verify the update took effect.
|
||||
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(8), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(5), summary.UniqueUsers)
|
||||
require.Equal(t, int64(200), summary.AllowedRequests)
|
||||
require.Equal(t, int64(20), summary.DeniedRequests)
|
||||
}
|
||||
|
||||
func TestGetBoundaryUsageSummary_MultipleReplicas(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := dbauthz.AsBoundaryUsageTracker(context.Background())
|
||||
|
||||
replica1 := uuid.New()
|
||||
replica2 := uuid.New()
|
||||
replica3 := uuid.New()
|
||||
|
||||
// Insert stats for 3 replicas.
|
||||
_, err := db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replica1,
|
||||
UniqueWorkspacesCount: 10,
|
||||
UniqueUsersCount: 5,
|
||||
AllowedRequests: 100,
|
||||
DeniedRequests: 10,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replica2,
|
||||
UniqueWorkspacesCount: 15,
|
||||
UniqueUsersCount: 8,
|
||||
AllowedRequests: 150,
|
||||
DeniedRequests: 15,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replica3,
|
||||
UniqueWorkspacesCount: 20,
|
||||
UniqueUsersCount: 12,
|
||||
AllowedRequests: 200,
|
||||
DeniedRequests: 20,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify aggregation (SUM of all replicas).
|
||||
require.Equal(t, int64(45), summary.UniqueWorkspaces) // 10 + 15 + 20
|
||||
require.Equal(t, int64(25), summary.UniqueUsers) // 5 + 8 + 12
|
||||
require.Equal(t, int64(450), summary.AllowedRequests) // 100 + 150 + 200
|
||||
require.Equal(t, int64(45), summary.DeniedRequests) // 10 + 15 + 20
|
||||
}
|
||||
|
||||
func TestGetBoundaryUsageSummary_Empty(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := dbauthz.AsBoundaryUsageTracker(context.Background())
|
||||
|
||||
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
|
||||
// COALESCE should return 0 for all columns.
|
||||
require.Equal(t, int64(0), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(0), summary.UniqueUsers)
|
||||
require.Equal(t, int64(0), summary.AllowedRequests)
|
||||
require.Equal(t, int64(0), summary.DeniedRequests)
|
||||
}
|
||||
|
||||
func TestResetBoundaryUsageStats(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := dbauthz.AsBoundaryUsageTracker(context.Background())
|
||||
|
||||
// Insert stats for multiple replicas.
|
||||
for i := 0; i < 5; i++ {
|
||||
_, err := db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: uuid.New(),
|
||||
UniqueWorkspacesCount: int64(i + 1),
|
||||
UniqueUsersCount: int64(i + 1),
|
||||
AllowedRequests: int64((i + 1) * 10),
|
||||
DeniedRequests: int64(i + 1),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Verify data exists.
|
||||
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, summary.AllowedRequests, int64(0))
|
||||
|
||||
// Reset.
|
||||
err = db.ResetBoundaryUsageStats(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify all data is gone.
|
||||
summary, err = db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(0), summary.AllowedRequests)
|
||||
}
|
||||
|
||||
func TestDeleteBoundaryUsageStatsByReplicaID(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := dbauthz.AsBoundaryUsageTracker(context.Background())
|
||||
|
||||
replica1 := uuid.New()
|
||||
replica2 := uuid.New()
|
||||
|
||||
// Insert stats for 2 replicas.
|
||||
_, err := db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replica1,
|
||||
UniqueWorkspacesCount: 10,
|
||||
UniqueUsersCount: 5,
|
||||
AllowedRequests: 100,
|
||||
DeniedRequests: 10,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replica2,
|
||||
UniqueWorkspacesCount: 20,
|
||||
UniqueUsersCount: 10,
|
||||
AllowedRequests: 200,
|
||||
DeniedRequests: 20,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Delete replica1's stats.
|
||||
err = db.DeleteBoundaryUsageStatsByReplicaID(ctx, replica1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify only replica2's stats remain.
|
||||
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(20), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(200), summary.AllowedRequests)
|
||||
}
|
||||
|
||||
func TestTracker_TelemetryCycle(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
|
||||
// Simulate 3 replicas.
|
||||
tracker1 := boundaryusage.NewTracker()
|
||||
tracker2 := boundaryusage.NewTracker()
|
||||
tracker3 := boundaryusage.NewTracker()
|
||||
|
||||
replica1 := uuid.New()
|
||||
replica2 := uuid.New()
|
||||
replica3 := uuid.New()
|
||||
|
||||
// Each tracker records different workspaces/users.
|
||||
tracker1.Track(uuid.New(), uuid.New(), 10, 1)
|
||||
tracker1.Track(uuid.New(), uuid.New(), 15, 2)
|
||||
|
||||
tracker2.Track(uuid.New(), uuid.New(), 20, 3)
|
||||
tracker2.Track(uuid.New(), uuid.New(), 25, 4)
|
||||
tracker2.Track(uuid.New(), uuid.New(), 30, 5)
|
||||
|
||||
tracker3.Track(uuid.New(), uuid.New(), 5, 0)
|
||||
|
||||
// All replicas flush to database.
|
||||
require.NoError(t, tracker1.FlushToDB(ctx, db, replica1))
|
||||
require.NoError(t, tracker2.FlushToDB(ctx, db, replica2))
|
||||
require.NoError(t, tracker3.FlushToDB(ctx, db, replica3))
|
||||
|
||||
// Telemetry aggregates.
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify aggregation.
|
||||
require.Equal(t, int64(6), summary.UniqueWorkspaces) // 2 + 3 + 1
|
||||
require.Equal(t, int64(6), summary.UniqueUsers) // 2 + 3 + 1
|
||||
require.Equal(t, int64(105), summary.AllowedRequests) // 25 + 75 + 5
|
||||
require.Equal(t, int64(15), summary.DeniedRequests) // 3 + 12 + 0
|
||||
|
||||
// Telemetry resets stats (simulating telemetry report sent).
|
||||
require.NoError(t, db.ResetBoundaryUsageStats(boundaryCtx))
|
||||
|
||||
// Next flush from trackers should detect new period.
|
||||
tracker1.Track(uuid.New(), uuid.New(), 1, 0)
|
||||
require.NoError(t, tracker1.FlushToDB(ctx, db, replica1))
|
||||
|
||||
// Verify trackers reset their in-memory state.
|
||||
summary, err = db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(1), summary.AllowedRequests)
|
||||
}
|
||||
|
||||
func TestTracker_ConcurrentFlushAndTrack(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
|
||||
tracker := boundaryusage.NewTracker()
|
||||
replicaID := uuid.New()
|
||||
|
||||
const numOperations = 50
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Goroutine 1: Continuously track.
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < numOperations; i++ {
|
||||
tracker.Track(uuid.New(), uuid.New(), 1, 1)
|
||||
}
|
||||
}()
|
||||
|
||||
// Goroutine 2: Continuously flush.
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < numOperations; i++ {
|
||||
_ = tracker.FlushToDB(ctx, db, replicaID)
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Final flush to capture any remaining data.
|
||||
require.NoError(t, tracker.FlushToDB(ctx, db, replicaID))
|
||||
|
||||
// Verify stats are non-negative.
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.GreaterOrEqual(t, summary.AllowedRequests, int64(0))
|
||||
require.GreaterOrEqual(t, summary.DeniedRequests, int64(0))
|
||||
}
|
||||
+31
-1
@@ -44,10 +44,12 @@ import (
|
||||
"cdr.dev/slog/v3"
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/buildinfo"
|
||||
"github.com/coder/coder/v2/coderd/agentapi/metadatabatcher"
|
||||
_ "github.com/coder/coder/v2/coderd/apidoc" // Used for swagger docs.
|
||||
"github.com/coder/coder/v2/coderd/appearance"
|
||||
"github.com/coder/coder/v2/coderd/audit"
|
||||
"github.com/coder/coder/v2/coderd/awsidentity"
|
||||
"github.com/coder/coder/v2/coderd/boundaryusage"
|
||||
"github.com/coder/coder/v2/coderd/connectionlog"
|
||||
"github.com/coder/coder/v2/coderd/cryptokeys"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
@@ -241,6 +243,8 @@ type Options struct {
|
||||
UpdateAgentMetrics func(ctx context.Context, labels prometheusmetrics.AgentMetricLabels, metrics []*agentproto.Stats_Metric)
|
||||
StatsBatcher workspacestats.Batcher
|
||||
|
||||
MetadataBatcherOptions []metadatabatcher.Option
|
||||
|
||||
ProvisionerdServerMetrics *provisionerdserver.Metrics
|
||||
|
||||
// WorkspaceAppAuditSessionTimeout allows changing the timeout for audit
|
||||
@@ -263,6 +267,8 @@ type Options struct {
|
||||
DatabaseRolluper *dbrollup.Rolluper
|
||||
// WorkspaceUsageTracker tracks workspace usage by the CLI.
|
||||
WorkspaceUsageTracker *workspacestats.UsageTracker
|
||||
// BoundaryUsageTracker tracks boundary usage for telemetry.
|
||||
BoundaryUsageTracker *boundaryusage.Tracker
|
||||
// NotificationsEnqueuer handles enqueueing notifications for delivery by SMTP, webhook, etc.
|
||||
NotificationsEnqueuer notifications.Enqueuer
|
||||
|
||||
@@ -786,6 +792,23 @@ func New(options *Options) *API {
|
||||
AppStatBatchSize: workspaceapps.DefaultStatsDBReporterBatchSize,
|
||||
DisableDatabaseInserts: !options.DeploymentValues.StatsCollection.UsageStats.Enable.Value(),
|
||||
})
|
||||
|
||||
// Initialize the metadata batcher for batching agent metadata updates.
|
||||
batcherOpts := []metadatabatcher.Option{
|
||||
metadatabatcher.WithLogger(options.Logger.Named("metadata_batcher")),
|
||||
}
|
||||
batcherOpts = append(batcherOpts, options.MetadataBatcherOptions...)
|
||||
api.metadataBatcher, err = metadatabatcher.NewBatcher(
|
||||
api.ctx,
|
||||
options.PrometheusRegistry,
|
||||
options.Database,
|
||||
options.Pubsub,
|
||||
batcherOpts...,
|
||||
)
|
||||
if err != nil {
|
||||
api.Logger.Fatal(context.Background(), "failed to initialize metadata batcher", slog.Error(err))
|
||||
}
|
||||
|
||||
workspaceAppsLogger := options.Logger.Named("workspaceapps")
|
||||
if options.WorkspaceAppsStatsCollectorOptions.Logger == nil {
|
||||
named := workspaceAppsLogger.Named("stats_collector")
|
||||
@@ -1428,6 +1451,9 @@ func New(options *Options) *API {
|
||||
r.Get("/gitsshkey", api.agentGitSSHKey)
|
||||
r.Post("/log-source", api.workspaceAgentPostLogSource)
|
||||
r.Get("/reinit", api.workspaceAgentReinit)
|
||||
r.Route("/tasks/{task}", func(r chi.Router) {
|
||||
r.Post("/log-snapshot", api.postWorkspaceAgentTaskLogSnapshot)
|
||||
})
|
||||
})
|
||||
r.Route("/{workspaceagent}", func(r chi.Router) {
|
||||
r.Use(
|
||||
@@ -1865,7 +1891,8 @@ type API struct {
|
||||
healthCheckCache atomic.Pointer[healthsdk.HealthcheckReport]
|
||||
healthCheckProgress healthcheck.Progress
|
||||
|
||||
statsReporter *workspacestats.Reporter
|
||||
statsReporter *workspacestats.Reporter
|
||||
metadataBatcher *metadatabatcher.Batcher
|
||||
|
||||
Acquirer *provisionerdserver.Acquirer
|
||||
// dbRolluper rolls up template usage stats from raw agent and app
|
||||
@@ -1917,6 +1944,9 @@ func (api *API) Close() error {
|
||||
_ = (*coordinator).Close()
|
||||
}
|
||||
_ = api.statsReporter.Close()
|
||||
if api.metadataBatcher != nil {
|
||||
api.metadataBatcher.Close()
|
||||
}
|
||||
_ = api.NetworkTelemetryBatcher.Close()
|
||||
_ = api.OIDCConvertKeyCache.Close()
|
||||
_ = api.AppSigningKeyCache.Close()
|
||||
|
||||
@@ -55,6 +55,7 @@ import (
|
||||
"cdr.dev/slog/v3/sloggers/slogtest"
|
||||
"github.com/coder/coder/v2/archive"
|
||||
"github.com/coder/coder/v2/coderd"
|
||||
"github.com/coder/coder/v2/coderd/agentapi/metadatabatcher"
|
||||
"github.com/coder/coder/v2/coderd/audit"
|
||||
"github.com/coder/coder/v2/coderd/autobuild"
|
||||
"github.com/coder/coder/v2/coderd/awsidentity"
|
||||
@@ -82,6 +83,7 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/schedule"
|
||||
"github.com/coder/coder/v2/coderd/telemetry"
|
||||
"github.com/coder/coder/v2/coderd/updatecheck"
|
||||
"github.com/coder/coder/v2/coderd/usage"
|
||||
"github.com/coder/coder/v2/coderd/util/namesgenerator"
|
||||
"github.com/coder/coder/v2/coderd/util/ptr"
|
||||
"github.com/coder/coder/v2/coderd/webpush"
|
||||
@@ -171,8 +173,9 @@ type Options struct {
|
||||
SwaggerEndpoint bool
|
||||
// Logger should only be overridden if you expect errors
|
||||
// as part of your test.
|
||||
Logger *slog.Logger
|
||||
StatsBatcher workspacestats.Batcher
|
||||
Logger *slog.Logger
|
||||
StatsBatcher workspacestats.Batcher
|
||||
MetadataBatcherOptions []metadatabatcher.Option
|
||||
|
||||
WebpushDispatcher webpush.Dispatcher
|
||||
WorkspaceAppsStatsCollectorOptions workspaceapps.StatsCollectorOptions
|
||||
@@ -188,6 +191,7 @@ type Options struct {
|
||||
TelemetryReporter telemetry.Reporter
|
||||
|
||||
ProvisionerdServerMetrics *provisionerdserver.Metrics
|
||||
UsageInserter usage.Inserter
|
||||
}
|
||||
|
||||
// New constructs a codersdk client connected to an in-memory API instance.
|
||||
@@ -268,6 +272,11 @@ func NewOptions(t testing.TB, options *Options) (func(http.Handler), context.Can
|
||||
}
|
||||
}
|
||||
|
||||
var usageInserter *atomic.Pointer[usage.Inserter]
|
||||
if options.UsageInserter != nil {
|
||||
usageInserter = &atomic.Pointer[usage.Inserter]{}
|
||||
usageInserter.Store(&options.UsageInserter)
|
||||
}
|
||||
if options.Database == nil {
|
||||
options.Database, options.Pubsub = dbtestutil.NewDB(t)
|
||||
}
|
||||
@@ -561,6 +570,7 @@ func NewOptions(t testing.TB, options *Options) (func(http.Handler), context.Can
|
||||
Database: options.Database,
|
||||
Pubsub: options.Pubsub,
|
||||
ExternalAuthConfigs: options.ExternalAuthConfigs,
|
||||
UsageInserter: usageInserter,
|
||||
|
||||
Auditor: options.Auditor,
|
||||
ConnectionLogger: options.ConnectionLogger,
|
||||
@@ -598,6 +608,7 @@ func NewOptions(t testing.TB, options *Options) (func(http.Handler), context.Can
|
||||
HealthcheckTimeout: options.HealthcheckTimeout,
|
||||
HealthcheckRefresh: options.HealthcheckRefresh,
|
||||
StatsBatcher: options.StatsBatcher,
|
||||
MetadataBatcherOptions: options.MetadataBatcherOptions,
|
||||
WorkspaceAppsStatsCollectorOptions: options.WorkspaceAppsStatsCollectorOptions,
|
||||
AllowWorkspaceRenames: options.AllowWorkspaceRenames,
|
||||
NewTicker: options.NewTicker,
|
||||
|
||||
@@ -0,0 +1,44 @@
|
||||
package coderdtest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/usage"
|
||||
"github.com/coder/coder/v2/coderd/usage/usagetypes"
|
||||
)
|
||||
|
||||
var _ usage.Inserter = (*UsageInserter)(nil)
|
||||
|
||||
type UsageInserter struct {
|
||||
sync.Mutex
|
||||
events []usagetypes.DiscreteEvent
|
||||
}
|
||||
|
||||
func NewUsageInserter() *UsageInserter {
|
||||
return &UsageInserter{
|
||||
events: []usagetypes.DiscreteEvent{},
|
||||
}
|
||||
}
|
||||
|
||||
func (u *UsageInserter) InsertDiscreteUsageEvent(_ context.Context, _ database.Store, event usagetypes.DiscreteEvent) error {
|
||||
u.Lock()
|
||||
defer u.Unlock()
|
||||
u.events = append(u.events, event)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *UsageInserter) GetEvents() []usagetypes.DiscreteEvent {
|
||||
u.Lock()
|
||||
defer u.Unlock()
|
||||
eventsCopy := make([]usagetypes.DiscreteEvent, len(u.events))
|
||||
copy(eventsCopy, u.events)
|
||||
return eventsCopy
|
||||
}
|
||||
|
||||
func (u *UsageInserter) Reset() {
|
||||
u.Lock()
|
||||
defer u.Unlock()
|
||||
u.events = []usagetypes.DiscreteEvent{}
|
||||
}
|
||||
@@ -31,23 +31,14 @@ import (
|
||||
previewtypes "github.com/coder/preview/types"
|
||||
)
|
||||
|
||||
// List is a helper function to reduce boilerplate when converting slices of
|
||||
// database types to slices of codersdk types.
|
||||
// Only works if the function takes a single argument.
|
||||
// Deprecated: use slice.List
|
||||
func List[F any, T any](list []F, convert func(F) T) []T {
|
||||
return ListLazy(convert)(list)
|
||||
return slice.List[F, T](list, convert)
|
||||
}
|
||||
|
||||
// ListLazy returns the converter function for a list, but does not eval
|
||||
// the input. Helpful for combining the Map and the List functions.
|
||||
// Deprecated: use slice.ListLazy
|
||||
func ListLazy[F any, T any](convert func(F) T) func(list []F) []T {
|
||||
return func(list []F) []T {
|
||||
into := make([]T, 0, len(list))
|
||||
for _, item := range list {
|
||||
into = append(into, convert(item))
|
||||
}
|
||||
return into
|
||||
}
|
||||
return slice.ListLazy[F, T](convert)
|
||||
}
|
||||
|
||||
func APIAllowListTarget(entry rbac.AllowListElement) codersdk.APIAllowListTarget {
|
||||
|
||||
@@ -174,6 +174,19 @@ func (q *querier) authorizePrebuiltWorkspace(ctx context.Context, action policy.
|
||||
return xerrors.Errorf("authorize context: %w", workspaceErr)
|
||||
}
|
||||
|
||||
func workspaceTransitionAction(transition database.WorkspaceTransition) (policy.Action, error) {
|
||||
switch transition {
|
||||
case database.WorkspaceTransitionStart:
|
||||
return policy.ActionWorkspaceStart, nil
|
||||
case database.WorkspaceTransitionStop:
|
||||
return policy.ActionWorkspaceStop, nil
|
||||
case database.WorkspaceTransitionDelete:
|
||||
return policy.ActionDelete, nil
|
||||
default:
|
||||
return "", xerrors.Errorf("unsupported workspace transition %q", transition)
|
||||
}
|
||||
}
|
||||
|
||||
// authorizeAIBridgeInterceptionAction validates that the context's actor matches the initiator of the AIBridgeInterception.
|
||||
// This is used by all of the sub-resources which fall under the [ResourceAibridgeInterception] umbrella.
|
||||
func (q *querier) authorizeAIBridgeInterceptionAction(ctx context.Context, action policy.Action, interceptionID uuid.UUID) error {
|
||||
@@ -636,6 +649,25 @@ var (
|
||||
}),
|
||||
Scope: rbac.ScopeAll,
|
||||
}.WithCachedASTValue()
|
||||
|
||||
// Used by the boundary usage tracker to record telemetry statistics.
|
||||
subjectBoundaryUsageTracker = rbac.Subject{
|
||||
Type: rbac.SubjectTypeBoundaryUsageTracker,
|
||||
FriendlyName: "Boundary Usage Tracker",
|
||||
ID: uuid.Nil.String(),
|
||||
Roles: rbac.Roles([]rbac.Role{
|
||||
{
|
||||
Identifier: rbac.RoleIdentifier{Name: "boundary-usage-tracker"},
|
||||
DisplayName: "Boundary Usage Tracker",
|
||||
Site: rbac.Permissions(map[string][]policy.Action{
|
||||
rbac.ResourceBoundaryUsage.Type: rbac.ResourceBoundaryUsage.AvailableActions(),
|
||||
}),
|
||||
User: []rbac.Permission{},
|
||||
ByOrgID: map[string]rbac.OrgPermissions{},
|
||||
},
|
||||
}),
|
||||
Scope: rbac.ScopeAll,
|
||||
}.WithCachedASTValue()
|
||||
)
|
||||
|
||||
// AsProvisionerd returns a context with an actor that has permissions required
|
||||
@@ -736,6 +768,12 @@ func AsDBPurge(ctx context.Context) context.Context {
|
||||
return As(ctx, subjectDBPurge)
|
||||
}
|
||||
|
||||
// AsBoundaryUsageTracker returns a context with an actor that has permissions
|
||||
// required for the boundary usage tracker to record telemetry statistics.
|
||||
func AsBoundaryUsageTracker(ctx context.Context) context.Context {
|
||||
return As(ctx, subjectBoundaryUsageTracker)
|
||||
}
|
||||
|
||||
var AsRemoveActor = rbac.Subject{
|
||||
ID: "remove-actor",
|
||||
}
|
||||
@@ -1458,6 +1496,15 @@ func (q *querier) ArchiveUnusedTemplateVersions(ctx context.Context, arg databas
|
||||
return q.db.ArchiveUnusedTemplateVersions(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) BatchUpdateWorkspaceAgentMetadata(ctx context.Context, arg database.BatchUpdateWorkspaceAgentMetadataParams) error {
|
||||
// Could be any workspace agent and checking auth to each workspace agent is overkill for
|
||||
// the purpose of this function.
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceWorkspace.All()); err != nil {
|
||||
return err
|
||||
}
|
||||
return q.db.BatchUpdateWorkspaceAgentMetadata(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) BatchUpdateWorkspaceLastUsedAt(ctx context.Context, arg database.BatchUpdateWorkspaceLastUsedAtParams) error {
|
||||
// Could be any workspace and checking auth to each workspace is overkill for
|
||||
// the purpose of this function.
|
||||
@@ -1632,13 +1679,6 @@ func (q *querier) DeleteAPIKeysByUserID(ctx context.Context, userID uuid.UUID) e
|
||||
return q.db.DeleteAPIKeysByUserID(ctx, userID)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteAllTailnetClientSubscriptions(ctx context.Context, arg database.DeleteAllTailnetClientSubscriptionsParams) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return err
|
||||
}
|
||||
return q.db.DeleteAllTailnetClientSubscriptions(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteAllTailnetTunnels(ctx context.Context, arg database.DeleteAllTailnetTunnelsParams) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return err
|
||||
@@ -1663,11 +1703,11 @@ func (q *querier) DeleteApplicationConnectAPIKeysByUserID(ctx context.Context, u
|
||||
return q.db.DeleteApplicationConnectAPIKeysByUserID(ctx, userID)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteCoordinator(ctx context.Context, id uuid.UUID) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
func (q *querier) DeleteBoundaryUsageStatsByReplicaID(ctx context.Context, replicaID uuid.UUID) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceBoundaryUsage); err != nil {
|
||||
return err
|
||||
}
|
||||
return q.db.DeleteCoordinator(ctx, id)
|
||||
return q.db.DeleteBoundaryUsageStatsByReplicaID(ctx, replicaID)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteCryptoKey(ctx context.Context, arg database.DeleteCryptoKeyParams) (database.CryptoKey, error) {
|
||||
@@ -1878,27 +1918,6 @@ func (q *querier) DeleteRuntimeConfig(ctx context.Context, key string) error {
|
||||
return q.db.DeleteRuntimeConfig(ctx, key)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteTailnetAgent(ctx context.Context, arg database.DeleteTailnetAgentParams) (database.DeleteTailnetAgentRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return database.DeleteTailnetAgentRow{}, err
|
||||
}
|
||||
return q.db.DeleteTailnetAgent(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteTailnetClient(ctx context.Context, arg database.DeleteTailnetClientParams) (database.DeleteTailnetClientRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return database.DeleteTailnetClientRow{}, err
|
||||
}
|
||||
return q.db.DeleteTailnetClient(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteTailnetClientSubscription(ctx context.Context, arg database.DeleteTailnetClientSubscriptionParams) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return err
|
||||
}
|
||||
return q.db.DeleteTailnetClientSubscription(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteTailnetPeer(ctx context.Context, arg database.DeleteTailnetPeerParams) (database.DeleteTailnetPeerRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return database.DeleteTailnetPeerRow{}, err
|
||||
@@ -2183,13 +2202,6 @@ func (q *querier) GetActiveWorkspaceBuildsByTemplateID(ctx context.Context, temp
|
||||
return q.db.GetActiveWorkspaceBuildsByTemplateID(ctx, templateID)
|
||||
}
|
||||
|
||||
func (q *querier) GetAllTailnetAgents(ctx context.Context) ([]database.TailnetAgent, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return []database.TailnetAgent{}, err
|
||||
}
|
||||
return q.db.GetAllTailnetAgents(ctx)
|
||||
}
|
||||
|
||||
func (q *querier) GetAllTailnetCoordinators(ctx context.Context) ([]database.TailnetCoordinator, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return nil, err
|
||||
@@ -2259,6 +2271,13 @@ func (q *querier) GetAuthorizationUserRoles(ctx context.Context, userID uuid.UUI
|
||||
return q.db.GetAuthorizationUserRoles(ctx, userID)
|
||||
}
|
||||
|
||||
func (q *querier) GetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (database.GetBoundaryUsageSummaryRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceBoundaryUsage); err != nil {
|
||||
return database.GetBoundaryUsageSummaryRow{}, err
|
||||
}
|
||||
return q.db.GetBoundaryUsageSummary(ctx, maxStalenessMs)
|
||||
}
|
||||
|
||||
func (q *querier) GetConnectionLogsOffset(ctx context.Context, arg database.GetConnectionLogsOffsetParams) ([]database.GetConnectionLogsOffsetRow, error) {
|
||||
// Just like with the audit logs query, shortcut if the user is an owner.
|
||||
err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceConnectionLog)
|
||||
@@ -3055,20 +3074,6 @@ func (q *querier) GetRuntimeConfig(ctx context.Context, key string) (string, err
|
||||
return q.db.GetRuntimeConfig(ctx, key)
|
||||
}
|
||||
|
||||
func (q *querier) GetTailnetAgents(ctx context.Context, id uuid.UUID) ([]database.TailnetAgent, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q.db.GetTailnetAgents(ctx, id)
|
||||
}
|
||||
|
||||
func (q *querier) GetTailnetClientsForAgent(ctx context.Context, agentID uuid.UUID) ([]database.TailnetClient, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q.db.GetTailnetClientsForAgent(ctx, agentID)
|
||||
}
|
||||
|
||||
func (q *querier) GetTailnetPeers(ctx context.Context, id uuid.UUID) ([]database.TailnetPeer, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return nil, err
|
||||
@@ -3102,6 +3107,25 @@ func (q *querier) GetTaskByWorkspaceID(ctx context.Context, workspaceID uuid.UUI
|
||||
return fetch(q.log, q.auth, q.db.GetTaskByWorkspaceID)(ctx, workspaceID)
|
||||
}
|
||||
|
||||
func (q *querier) GetTaskSnapshot(ctx context.Context, taskID uuid.UUID) (database.TaskSnapshot, error) {
|
||||
// Fetch task to build RBAC object for authorization.
|
||||
task, err := q.GetTaskByID(ctx, taskID)
|
||||
if err != nil {
|
||||
return database.TaskSnapshot{}, err
|
||||
}
|
||||
|
||||
obj := rbac.ResourceTask.
|
||||
WithID(task.ID).
|
||||
WithOwner(task.OwnerID.String()).
|
||||
InOrg(task.OrganizationID)
|
||||
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, obj); err != nil {
|
||||
return database.TaskSnapshot{}, err
|
||||
}
|
||||
|
||||
return q.db.GetTaskSnapshot(ctx, taskID)
|
||||
}
|
||||
|
||||
func (q *querier) GetTelemetryItem(ctx context.Context, key string) (database.TelemetryItem, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceSystem); err != nil {
|
||||
return database.TelemetryItem{}, err
|
||||
@@ -4622,11 +4646,9 @@ func (q *querier) InsertWorkspaceBuild(ctx context.Context, arg database.InsertW
|
||||
return xerrors.Errorf("get workspace by id: %w", err)
|
||||
}
|
||||
|
||||
var action policy.Action = policy.ActionWorkspaceStart
|
||||
if arg.Transition == database.WorkspaceTransitionDelete {
|
||||
action = policy.ActionDelete
|
||||
} else if arg.Transition == database.WorkspaceTransitionStop {
|
||||
action = policy.ActionWorkspaceStop
|
||||
action, err := workspaceTransitionAction(arg.Transition)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Special handling for prebuilt workspace deletion
|
||||
@@ -4670,8 +4692,13 @@ func (q *querier) InsertWorkspaceBuildParameters(ctx context.Context, arg databa
|
||||
return err
|
||||
}
|
||||
|
||||
action, err := workspaceTransitionAction(build.Transition)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Special handling for prebuilt workspace deletion
|
||||
if err := q.authorizePrebuiltWorkspace(ctx, policy.ActionUpdate, workspace); err != nil {
|
||||
if err := q.authorizePrebuiltWorkspace(ctx, action, workspace); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -4864,6 +4891,13 @@ func (q *querier) RemoveUserFromGroups(ctx context.Context, arg database.RemoveU
|
||||
return q.db.RemoveUserFromGroups(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) ResetBoundaryUsageStats(ctx context.Context) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceBoundaryUsage); err != nil {
|
||||
return err
|
||||
}
|
||||
return q.db.ResetBoundaryUsageStats(ctx)
|
||||
}
|
||||
|
||||
func (q *querier) RevokeDBCryptKey(ctx context.Context, activeKeyDigest string) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceSystem); err != nil {
|
||||
return err
|
||||
@@ -5955,6 +5989,13 @@ func (q *querier) UpsertApplicationName(ctx context.Context, value string) error
|
||||
return q.db.UpsertApplicationName(ctx, value)
|
||||
}
|
||||
|
||||
func (q *querier) UpsertBoundaryUsageStats(ctx context.Context, arg database.UpsertBoundaryUsageStatsParams) (bool, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceBoundaryUsage); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return q.db.UpsertBoundaryUsageStats(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpsertConnectionLog(ctx context.Context, arg database.UpsertConnectionLogParams) (database.ConnectionLog, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceConnectionLog); err != nil {
|
||||
return database.ConnectionLog{}, err
|
||||
@@ -6050,27 +6091,6 @@ func (q *querier) UpsertRuntimeConfig(ctx context.Context, arg database.UpsertRu
|
||||
return q.db.UpsertRuntimeConfig(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpsertTailnetAgent(ctx context.Context, arg database.UpsertTailnetAgentParams) (database.TailnetAgent, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return database.TailnetAgent{}, err
|
||||
}
|
||||
return q.db.UpsertTailnetAgent(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpsertTailnetClient(ctx context.Context, arg database.UpsertTailnetClientParams) (database.TailnetClient, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return database.TailnetClient{}, err
|
||||
}
|
||||
return q.db.UpsertTailnetClient(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpsertTailnetClientSubscription(ctx context.Context, arg database.UpsertTailnetClientSubscriptionParams) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return err
|
||||
}
|
||||
return q.db.UpsertTailnetClientSubscription(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpsertTailnetCoordinator(ctx context.Context, id uuid.UUID) (database.TailnetCoordinator, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return database.TailnetCoordinator{}, err
|
||||
@@ -6092,6 +6112,25 @@ func (q *querier) UpsertTailnetTunnel(ctx context.Context, arg database.UpsertTa
|
||||
return q.db.UpsertTailnetTunnel(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpsertTaskSnapshot(ctx context.Context, arg database.UpsertTaskSnapshotParams) error {
|
||||
// Fetch task to build RBAC object for authorization.
|
||||
task, err := q.GetTaskByID(ctx, arg.TaskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
obj := rbac.ResourceTask.
|
||||
WithID(task.ID).
|
||||
WithOwner(task.OwnerID.String()).
|
||||
InOrg(task.OrganizationID)
|
||||
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, obj); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return q.db.UpsertTaskSnapshot(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpsertTaskWorkspaceApp(ctx context.Context, arg database.UpsertTaskWorkspaceAppParams) (database.TaskWorkspaceApp, error) {
|
||||
// Fetch the task to derive the RBAC object and authorize update on it.
|
||||
task, err := q.db.GetTaskByID(ctx, arg.TaskID)
|
||||
|
||||
@@ -278,6 +278,11 @@ func (s *MethodTestSuite) TestAPIKey() {
|
||||
dbm.EXPECT().DeleteApplicationConnectAPIKeysByUserID(gomock.Any(), a.UserID).Return(nil).AnyTimes()
|
||||
check.Args(a.UserID).Asserts(rbac.ResourceApiKey.WithOwner(a.UserID.String()), policy.ActionDelete).Returns()
|
||||
}))
|
||||
s.Run("DeleteBoundaryUsageStatsByReplicaID", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
replicaID := uuid.New()
|
||||
dbm.EXPECT().DeleteBoundaryUsageStatsByReplicaID(gomock.Any(), replicaID).Return(nil).AnyTimes()
|
||||
check.Args(replicaID).Asserts(rbac.ResourceBoundaryUsage, policy.ActionDelete)
|
||||
}))
|
||||
s.Run("DeleteExternalAuthLink", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
a := testutil.Fake(s.T(), faker, database.ExternalAuthLink{})
|
||||
dbm.EXPECT().GetExternalAuthLink(gomock.Any(), database.GetExternalAuthLinkParams{ProviderID: a.ProviderID, UserID: a.UserID}).Return(a, nil).AnyTimes()
|
||||
@@ -528,6 +533,10 @@ func (s *MethodTestSuite) TestGroup() {
|
||||
dbm.EXPECT().RemoveUserFromGroups(gomock.Any(), arg).Return(slice.New(g1.ID, g2.ID), nil).AnyTimes()
|
||||
check.Args(arg).Asserts(rbac.ResourceSystem, policy.ActionUpdate).Returns(slice.New(g1.ID, g2.ID))
|
||||
}))
|
||||
s.Run("ResetBoundaryUsageStats", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
dbm.EXPECT().ResetBoundaryUsageStats(gomock.Any()).Return(nil).AnyTimes()
|
||||
check.Args().Asserts(rbac.ResourceBoundaryUsage, policy.ActionDelete)
|
||||
}))
|
||||
|
||||
s.Run("UpdateGroupByID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
g := testutil.Fake(s.T(), faker, database.Group{})
|
||||
@@ -1862,6 +1871,18 @@ func (s *MethodTestSuite) TestWorkspace() {
|
||||
dbm.EXPECT().GetWorkspaceAgentMetadata(gomock.Any(), arg).Return([]database.WorkspaceAgentMetadatum{dt}, nil).AnyTimes()
|
||||
check.Args(arg).Asserts(w, policy.ActionRead).Returns([]database.WorkspaceAgentMetadatum{dt})
|
||||
}))
|
||||
s.Run("BatchUpdateWorkspaceAgentMetadata", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
agt := testutil.Fake(s.T(), faker, database.WorkspaceAgent{})
|
||||
arg := database.BatchUpdateWorkspaceAgentMetadataParams{
|
||||
WorkspaceAgentID: []uuid.UUID{agt.ID},
|
||||
Key: []string{"key1"},
|
||||
Value: []string{"value1"},
|
||||
Error: []string{""},
|
||||
CollectedAt: []time.Time{dbtime.Now()},
|
||||
}
|
||||
dbm.EXPECT().BatchUpdateWorkspaceAgentMetadata(gomock.Any(), arg).Return(nil).AnyTimes()
|
||||
check.Args(arg).Asserts(rbac.ResourceWorkspace.All(), policy.ActionUpdate).Returns()
|
||||
}))
|
||||
s.Run("GetWorkspaceAgentByInstanceID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
w := testutil.Fake(s.T(), faker, database.Workspace{})
|
||||
agt := testutil.Fake(s.T(), faker, database.WorkspaceAgent{})
|
||||
@@ -2133,9 +2154,12 @@ func (s *MethodTestSuite) TestWorkspace() {
|
||||
dbm.EXPECT().InsertWorkspaceBuild(gomock.Any(), arg).Return(nil).AnyTimes()
|
||||
check.Args(arg).Asserts(w, policy.ActionDelete)
|
||||
}))
|
||||
s.Run("InsertWorkspaceBuildParameters", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
s.Run("Start/InsertWorkspaceBuildParameters", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
w := testutil.Fake(s.T(), faker, database.Workspace{})
|
||||
b := testutil.Fake(s.T(), faker, database.WorkspaceBuild{WorkspaceID: w.ID})
|
||||
b := testutil.Fake(s.T(), faker, database.WorkspaceBuild{
|
||||
WorkspaceID: w.ID,
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
})
|
||||
arg := database.InsertWorkspaceBuildParametersParams{
|
||||
WorkspaceBuildID: b.ID,
|
||||
Name: []string{"foo", "bar"},
|
||||
@@ -2144,7 +2168,39 @@ func (s *MethodTestSuite) TestWorkspace() {
|
||||
dbm.EXPECT().GetWorkspaceBuildByID(gomock.Any(), b.ID).Return(b, nil).AnyTimes()
|
||||
dbm.EXPECT().GetWorkspaceByID(gomock.Any(), w.ID).Return(w, nil).AnyTimes()
|
||||
dbm.EXPECT().InsertWorkspaceBuildParameters(gomock.Any(), arg).Return(nil).AnyTimes()
|
||||
check.Args(arg).Asserts(w, policy.ActionUpdate)
|
||||
check.Args(arg).Asserts(w, policy.ActionWorkspaceStart)
|
||||
}))
|
||||
s.Run("Stop/InsertWorkspaceBuildParameters", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
w := testutil.Fake(s.T(), faker, database.Workspace{})
|
||||
b := testutil.Fake(s.T(), faker, database.WorkspaceBuild{
|
||||
WorkspaceID: w.ID,
|
||||
Transition: database.WorkspaceTransitionStop,
|
||||
})
|
||||
arg := database.InsertWorkspaceBuildParametersParams{
|
||||
WorkspaceBuildID: b.ID,
|
||||
Name: []string{"foo", "bar"},
|
||||
Value: []string{"baz", "qux"},
|
||||
}
|
||||
dbm.EXPECT().GetWorkspaceBuildByID(gomock.Any(), b.ID).Return(b, nil).AnyTimes()
|
||||
dbm.EXPECT().GetWorkspaceByID(gomock.Any(), w.ID).Return(w, nil).AnyTimes()
|
||||
dbm.EXPECT().InsertWorkspaceBuildParameters(gomock.Any(), arg).Return(nil).AnyTimes()
|
||||
check.Args(arg).Asserts(w, policy.ActionWorkspaceStop)
|
||||
}))
|
||||
s.Run("Delete/InsertWorkspaceBuildParameters", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
w := testutil.Fake(s.T(), faker, database.Workspace{})
|
||||
b := testutil.Fake(s.T(), faker, database.WorkspaceBuild{
|
||||
WorkspaceID: w.ID,
|
||||
Transition: database.WorkspaceTransitionDelete,
|
||||
})
|
||||
arg := database.InsertWorkspaceBuildParametersParams{
|
||||
WorkspaceBuildID: b.ID,
|
||||
Name: []string{"foo", "bar"},
|
||||
Value: []string{"baz", "qux"},
|
||||
}
|
||||
dbm.EXPECT().GetWorkspaceBuildByID(gomock.Any(), b.ID).Return(b, nil).AnyTimes()
|
||||
dbm.EXPECT().GetWorkspaceByID(gomock.Any(), w.ID).Return(w, nil).AnyTimes()
|
||||
dbm.EXPECT().InsertWorkspaceBuildParameters(gomock.Any(), arg).Return(nil).AnyTimes()
|
||||
check.Args(arg).Asserts(w, policy.ActionDelete)
|
||||
}))
|
||||
s.Run("UpdateWorkspace", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
w := testutil.Fake(s.T(), faker, database.Workspace{})
|
||||
@@ -2535,6 +2591,24 @@ func (s *MethodTestSuite) TestTasks() {
|
||||
dbm.EXPECT().ListTasks(gomock.Any(), gomock.Any()).Return([]database.Task{t1, t2}, nil).AnyTimes()
|
||||
check.Args(database.ListTasksParams{}).Asserts(t1, policy.ActionRead, t2, policy.ActionRead).Returns([]database.Task{t1, t2})
|
||||
}))
|
||||
s.Run("GetTaskSnapshot", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
task := testutil.Fake(s.T(), faker, database.Task{})
|
||||
snapshot := testutil.Fake(s.T(), faker, database.TaskSnapshot{TaskID: task.ID})
|
||||
dbm.EXPECT().GetTaskByID(gomock.Any(), task.ID).Return(task, nil).AnyTimes()
|
||||
dbm.EXPECT().GetTaskSnapshot(gomock.Any(), task.ID).Return(snapshot, nil).AnyTimes()
|
||||
check.Args(task.ID).Asserts(task, policy.ActionRead, task, policy.ActionRead).Returns(snapshot)
|
||||
}))
|
||||
s.Run("UpsertTaskSnapshot", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
task := testutil.Fake(s.T(), faker, database.Task{})
|
||||
arg := database.UpsertTaskSnapshotParams{
|
||||
TaskID: task.ID,
|
||||
LogSnapshot: []byte(`{"format":"agentapi","data":[]}`),
|
||||
LogSnapshotCreatedAt: dbtime.Now(),
|
||||
}
|
||||
dbm.EXPECT().GetTaskByID(gomock.Any(), task.ID).Return(task, nil).AnyTimes()
|
||||
dbm.EXPECT().UpsertTaskSnapshot(gomock.Any(), arg).Return(nil).AnyTimes()
|
||||
check.Args(arg).Asserts(task, policy.ActionRead, task, policy.ActionUpdate).Returns()
|
||||
}))
|
||||
}
|
||||
|
||||
func (s *MethodTestSuite) TestProvisionerKeys() {
|
||||
@@ -2730,30 +2804,10 @@ func (s *MethodTestSuite) TestTailnetFunctions() {
|
||||
check.Args().
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionDelete)
|
||||
}))
|
||||
s.Run("DeleteAllTailnetClientSubscriptions", s.Subtest(func(_ database.Store, check *expects) {
|
||||
check.Args(database.DeleteAllTailnetClientSubscriptionsParams{}).
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionDelete)
|
||||
}))
|
||||
s.Run("DeleteAllTailnetTunnels", s.Subtest(func(_ database.Store, check *expects) {
|
||||
check.Args(database.DeleteAllTailnetTunnelsParams{}).
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionDelete)
|
||||
}))
|
||||
s.Run("DeleteCoordinator", s.Subtest(func(_ database.Store, check *expects) {
|
||||
check.Args(uuid.New()).
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionDelete)
|
||||
}))
|
||||
s.Run("DeleteTailnetAgent", s.Subtest(func(_ database.Store, check *expects) {
|
||||
check.Args(database.DeleteTailnetAgentParams{}).
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionUpdate).Errors(sql.ErrNoRows)
|
||||
}))
|
||||
s.Run("DeleteTailnetClient", s.Subtest(func(_ database.Store, check *expects) {
|
||||
check.Args(database.DeleteTailnetClientParams{}).
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionDelete).Errors(sql.ErrNoRows)
|
||||
}))
|
||||
s.Run("DeleteTailnetClientSubscription", s.Subtest(func(_ database.Store, check *expects) {
|
||||
check.Args(database.DeleteTailnetClientSubscriptionParams{}).
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionDelete)
|
||||
}))
|
||||
s.Run("DeleteTailnetPeer", s.Subtest(func(_ database.Store, check *expects) {
|
||||
check.Args(database.DeleteTailnetPeerParams{}).
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionDelete).Errors(sql.ErrNoRows)
|
||||
@@ -2762,18 +2816,6 @@ func (s *MethodTestSuite) TestTailnetFunctions() {
|
||||
check.Args(database.DeleteTailnetTunnelParams{}).
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionDelete).Errors(sql.ErrNoRows)
|
||||
}))
|
||||
s.Run("GetAllTailnetAgents", s.Subtest(func(_ database.Store, check *expects) {
|
||||
check.Args().
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetTailnetAgents", s.Subtest(func(_ database.Store, check *expects) {
|
||||
check.Args(uuid.New()).
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetTailnetClientsForAgent", s.Subtest(func(_ database.Store, check *expects) {
|
||||
check.Args(uuid.New()).
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetTailnetPeers", s.Subtest(func(_ database.Store, check *expects) {
|
||||
check.Args(uuid.New()).
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionRead)
|
||||
@@ -2798,21 +2840,6 @@ func (s *MethodTestSuite) TestTailnetFunctions() {
|
||||
check.Args().
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionRead)
|
||||
}))
|
||||
s.Run("UpsertTailnetAgent", s.Subtest(func(db database.Store, check *expects) {
|
||||
dbtestutil.DisableForeignKeysAndTriggers(s.T(), db)
|
||||
check.Args(database.UpsertTailnetAgentParams{Node: json.RawMessage("{}")}).
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionUpdate)
|
||||
}))
|
||||
s.Run("UpsertTailnetClient", s.Subtest(func(db database.Store, check *expects) {
|
||||
dbtestutil.DisableForeignKeysAndTriggers(s.T(), db)
|
||||
check.Args(database.UpsertTailnetClientParams{Node: json.RawMessage("{}")}).
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionUpdate)
|
||||
}))
|
||||
s.Run("UpsertTailnetClientSubscription", s.Subtest(func(db database.Store, check *expects) {
|
||||
dbtestutil.DisableForeignKeysAndTriggers(s.T(), db)
|
||||
check.Args(database.UpsertTailnetClientSubscriptionParams{}).
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionUpdate)
|
||||
}))
|
||||
s.Run("UpsertTailnetCoordinator", s.Subtest(func(_ database.Store, check *expects) {
|
||||
check.Args(uuid.New()).
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionUpdate)
|
||||
@@ -2954,6 +2981,10 @@ func (s *MethodTestSuite) TestSystemFunctions() {
|
||||
dbm.EXPECT().GetAuthorizationUserRoles(gomock.Any(), u.ID).Return(database.GetAuthorizationUserRolesRow{}, nil).AnyTimes()
|
||||
check.Args(u.ID).Asserts(rbac.ResourceSystem, policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetBoundaryUsageSummary", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
dbm.EXPECT().GetBoundaryUsageSummary(gomock.Any(), int64(1000)).Return(database.GetBoundaryUsageSummaryRow{}, nil).AnyTimes()
|
||||
check.Args(int64(1000)).Asserts(rbac.ResourceBoundaryUsage, policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetDERPMeshKey", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
dbm.EXPECT().GetDERPMeshKey(gomock.Any()).Return("testing", nil).AnyTimes()
|
||||
check.Args().Asserts(rbac.ResourceSystem, policy.ActionRead)
|
||||
@@ -3324,6 +3355,11 @@ func (s *MethodTestSuite) TestSystemFunctions() {
|
||||
dbm.EXPECT().UpsertApplicationName(gomock.Any(), "").Return(nil).AnyTimes()
|
||||
check.Args("").Asserts(rbac.ResourceDeploymentConfig, policy.ActionUpdate)
|
||||
}))
|
||||
s.Run("UpsertBoundaryUsageStats", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
arg := database.UpsertBoundaryUsageStatsParams{ReplicaID: uuid.New()}
|
||||
dbm.EXPECT().UpsertBoundaryUsageStats(gomock.Any(), arg).Return(false, nil).AnyTimes()
|
||||
check.Args(arg).Asserts(rbac.ResourceBoundaryUsage, policy.ActionUpdate)
|
||||
}))
|
||||
s.Run("GetHealthSettings", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
dbm.EXPECT().GetHealthSettings(gomock.Any()).Return("{}", nil).AnyTimes()
|
||||
check.Args().Asserts()
|
||||
@@ -4424,7 +4460,7 @@ func (s *MethodTestSuite) TestAuthorizePrebuiltWorkspace() {
|
||||
return nil
|
||||
}).Asserts(w, policy.ActionDelete, w.AsPrebuild(), policy.ActionDelete)
|
||||
}))
|
||||
s.Run("PrebuildUpdate/InsertWorkspaceBuildParameters", s.Subtest(func(db database.Store, check *expects) {
|
||||
s.Run("PrebuildDelete/InsertWorkspaceBuildParameters", s.Subtest(func(db database.Store, check *expects) {
|
||||
u := dbgen.User(s.T(), db, database.User{})
|
||||
o := dbgen.Organization(s.T(), db, database.Organization{})
|
||||
tpl := dbgen.Template(s.T(), db, database.Template{
|
||||
@@ -4446,6 +4482,7 @@ func (s *MethodTestSuite) TestAuthorizePrebuiltWorkspace() {
|
||||
})
|
||||
wb := dbgen.WorkspaceBuild(s.T(), db, database.WorkspaceBuild{
|
||||
JobID: pj.ID,
|
||||
Transition: database.WorkspaceTransitionDelete,
|
||||
WorkspaceID: w.ID,
|
||||
TemplateVersionID: tv.ID,
|
||||
})
|
||||
@@ -4461,7 +4498,7 @@ func (s *MethodTestSuite) TestAuthorizePrebuiltWorkspace() {
|
||||
return xerrors.Errorf("not authorized for workspace type")
|
||||
}
|
||||
return nil
|
||||
}).Asserts(w, policy.ActionUpdate, w.AsPrebuild(), policy.ActionUpdate)
|
||||
}).Asserts(w, policy.ActionDelete, w.AsPrebuild(), policy.ActionDelete)
|
||||
}))
|
||||
}
|
||||
|
||||
|
||||
@@ -394,6 +394,7 @@ func WorkspaceAgentDevcontainer(t testing.TB, db database.Store, orig database.W
|
||||
Name: []string{takeFirst(orig.Name, testutil.GetRandomName(t))},
|
||||
WorkspaceFolder: []string{takeFirst(orig.WorkspaceFolder, "/workspace")},
|
||||
ConfigPath: []string{takeFirst(orig.ConfigPath, "")},
|
||||
SubagentID: []uuid.UUID{orig.SubagentID.UUID},
|
||||
})
|
||||
require.NoError(t, err, "insert workspace agent devcontainer")
|
||||
return devcontainers[0]
|
||||
|
||||
@@ -152,6 +152,13 @@ func (m queryMetricsStore) ArchiveUnusedTemplateVersions(ctx context.Context, ar
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) BatchUpdateWorkspaceAgentMetadata(ctx context.Context, arg database.BatchUpdateWorkspaceAgentMetadataParams) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.BatchUpdateWorkspaceAgentMetadata(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("BatchUpdateWorkspaceAgentMetadata").Observe(time.Since(start).Seconds())
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) BatchUpdateWorkspaceLastUsedAt(ctx context.Context, arg database.BatchUpdateWorkspaceLastUsedAtParams) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.BatchUpdateWorkspaceLastUsedAt(ctx, arg)
|
||||
@@ -304,14 +311,6 @@ func (m queryMetricsStore) DeleteAPIKeysByUserID(ctx context.Context, userID uui
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteAllTailnetClientSubscriptions(ctx context.Context, arg database.DeleteAllTailnetClientSubscriptionsParams) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.DeleteAllTailnetClientSubscriptions(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("DeleteAllTailnetClientSubscriptions").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "DeleteAllTailnetClientSubscriptions").Inc()
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteAllTailnetTunnels(ctx context.Context, arg database.DeleteAllTailnetTunnelsParams) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.DeleteAllTailnetTunnels(ctx, arg)
|
||||
@@ -336,11 +335,11 @@ func (m queryMetricsStore) DeleteApplicationConnectAPIKeysByUserID(ctx context.C
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteCoordinator(ctx context.Context, id uuid.UUID) error {
|
||||
func (m queryMetricsStore) DeleteBoundaryUsageStatsByReplicaID(ctx context.Context, replicaID uuid.UUID) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.DeleteCoordinator(ctx, id)
|
||||
m.queryLatencies.WithLabelValues("DeleteCoordinator").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "DeleteCoordinator").Inc()
|
||||
r0 := m.s.DeleteBoundaryUsageStatsByReplicaID(ctx, replicaID)
|
||||
m.queryLatencies.WithLabelValues("DeleteBoundaryUsageStatsByReplicaID").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "DeleteBoundaryUsageStatsByReplicaID").Inc()
|
||||
return r0
|
||||
}
|
||||
|
||||
@@ -560,30 +559,6 @@ func (m queryMetricsStore) DeleteRuntimeConfig(ctx context.Context, key string)
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteTailnetAgent(ctx context.Context, arg database.DeleteTailnetAgentParams) (database.DeleteTailnetAgentRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.DeleteTailnetAgent(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("DeleteTailnetAgent").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "DeleteTailnetAgent").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteTailnetClient(ctx context.Context, arg database.DeleteTailnetClientParams) (database.DeleteTailnetClientRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.DeleteTailnetClient(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("DeleteTailnetClient").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "DeleteTailnetClient").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteTailnetClientSubscription(ctx context.Context, arg database.DeleteTailnetClientSubscriptionParams) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.DeleteTailnetClientSubscription(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("DeleteTailnetClientSubscription").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "DeleteTailnetClientSubscription").Inc()
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteTailnetPeer(ctx context.Context, arg database.DeleteTailnetPeerParams) (database.DeleteTailnetPeerRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.DeleteTailnetPeer(ctx, arg)
|
||||
@@ -855,14 +830,6 @@ func (m queryMetricsStore) GetActiveWorkspaceBuildsByTemplateID(ctx context.Cont
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetAllTailnetAgents(ctx context.Context) ([]database.TailnetAgent, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetAllTailnetAgents(ctx)
|
||||
m.queryLatencies.WithLabelValues("GetAllTailnetAgents").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "GetAllTailnetAgents").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetAllTailnetCoordinators(ctx context.Context) ([]database.TailnetCoordinator, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetAllTailnetCoordinators(ctx)
|
||||
@@ -935,6 +902,14 @@ func (m queryMetricsStore) GetAuthorizationUserRoles(ctx context.Context, userID
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (database.GetBoundaryUsageSummaryRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetBoundaryUsageSummary(ctx, maxStalenessMs)
|
||||
m.queryLatencies.WithLabelValues("GetBoundaryUsageSummary").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "GetBoundaryUsageSummary").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetConnectionLogsOffset(ctx context.Context, arg database.GetConnectionLogsOffsetParams) ([]database.GetConnectionLogsOffsetRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetConnectionLogsOffset(ctx, arg)
|
||||
@@ -1751,22 +1726,6 @@ func (m queryMetricsStore) GetRuntimeConfig(ctx context.Context, key string) (st
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetTailnetAgents(ctx context.Context, id uuid.UUID) ([]database.TailnetAgent, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetTailnetAgents(ctx, id)
|
||||
m.queryLatencies.WithLabelValues("GetTailnetAgents").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "GetTailnetAgents").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetTailnetClientsForAgent(ctx context.Context, agentID uuid.UUID) ([]database.TailnetClient, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetTailnetClientsForAgent(ctx, agentID)
|
||||
m.queryLatencies.WithLabelValues("GetTailnetClientsForAgent").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "GetTailnetClientsForAgent").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetTailnetPeers(ctx context.Context, id uuid.UUID) ([]database.TailnetPeer, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetTailnetPeers(ctx, id)
|
||||
@@ -1815,6 +1774,14 @@ func (m queryMetricsStore) GetTaskByWorkspaceID(ctx context.Context, workspaceID
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetTaskSnapshot(ctx context.Context, taskID uuid.UUID) (database.TaskSnapshot, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetTaskSnapshot(ctx, taskID)
|
||||
m.queryLatencies.WithLabelValues("GetTaskSnapshot").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "GetTaskSnapshot").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetTelemetryItem(ctx context.Context, key string) (database.TelemetryItem, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetTelemetryItem(ctx, key)
|
||||
@@ -3367,6 +3334,14 @@ func (m queryMetricsStore) RemoveUserFromGroups(ctx context.Context, arg databas
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) ResetBoundaryUsageStats(ctx context.Context) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.ResetBoundaryUsageStats(ctx)
|
||||
m.queryLatencies.WithLabelValues("ResetBoundaryUsageStats").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "ResetBoundaryUsageStats").Inc()
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) RevokeDBCryptKey(ctx context.Context, activeKeyDigest string) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.RevokeDBCryptKey(ctx, activeKeyDigest)
|
||||
@@ -4118,6 +4093,14 @@ func (m queryMetricsStore) UpsertApplicationName(ctx context.Context, value stri
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpsertBoundaryUsageStats(ctx context.Context, arg database.UpsertBoundaryUsageStatsParams) (bool, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.UpsertBoundaryUsageStats(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("UpsertBoundaryUsageStats").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "UpsertBoundaryUsageStats").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpsertConnectionLog(ctx context.Context, arg database.UpsertConnectionLogParams) (database.ConnectionLog, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.UpsertConnectionLog(ctx, arg)
|
||||
@@ -4222,30 +4205,6 @@ func (m queryMetricsStore) UpsertRuntimeConfig(ctx context.Context, arg database
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpsertTailnetAgent(ctx context.Context, arg database.UpsertTailnetAgentParams) (database.TailnetAgent, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.UpsertTailnetAgent(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("UpsertTailnetAgent").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "UpsertTailnetAgent").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpsertTailnetClient(ctx context.Context, arg database.UpsertTailnetClientParams) (database.TailnetClient, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.UpsertTailnetClient(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("UpsertTailnetClient").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "UpsertTailnetClient").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpsertTailnetClientSubscription(ctx context.Context, arg database.UpsertTailnetClientSubscriptionParams) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.UpsertTailnetClientSubscription(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("UpsertTailnetClientSubscription").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "UpsertTailnetClientSubscription").Inc()
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpsertTailnetCoordinator(ctx context.Context, id uuid.UUID) (database.TailnetCoordinator, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.UpsertTailnetCoordinator(ctx, id)
|
||||
@@ -4270,6 +4229,14 @@ func (m queryMetricsStore) UpsertTailnetTunnel(ctx context.Context, arg database
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpsertTaskSnapshot(ctx context.Context, arg database.UpsertTaskSnapshotParams) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.UpsertTaskSnapshot(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("UpsertTaskSnapshot").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "UpsertTaskSnapshot").Inc()
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpsertTaskWorkspaceApp(ctx context.Context, arg database.UpsertTaskWorkspaceAppParams) (database.TaskWorkspaceApp, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.UpsertTaskWorkspaceApp(ctx, arg)
|
||||
|
||||
@@ -132,6 +132,20 @@ func (mr *MockStoreMockRecorder) ArchiveUnusedTemplateVersions(ctx, arg any) *go
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ArchiveUnusedTemplateVersions", reflect.TypeOf((*MockStore)(nil).ArchiveUnusedTemplateVersions), ctx, arg)
|
||||
}
|
||||
|
||||
// BatchUpdateWorkspaceAgentMetadata mocks base method.
|
||||
func (m *MockStore) BatchUpdateWorkspaceAgentMetadata(ctx context.Context, arg database.BatchUpdateWorkspaceAgentMetadataParams) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "BatchUpdateWorkspaceAgentMetadata", ctx, arg)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// BatchUpdateWorkspaceAgentMetadata indicates an expected call of BatchUpdateWorkspaceAgentMetadata.
|
||||
func (mr *MockStoreMockRecorder) BatchUpdateWorkspaceAgentMetadata(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchUpdateWorkspaceAgentMetadata", reflect.TypeOf((*MockStore)(nil).BatchUpdateWorkspaceAgentMetadata), ctx, arg)
|
||||
}
|
||||
|
||||
// BatchUpdateWorkspaceLastUsedAt mocks base method.
|
||||
func (m *MockStore) BatchUpdateWorkspaceLastUsedAt(ctx context.Context, arg database.BatchUpdateWorkspaceLastUsedAtParams) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -455,20 +469,6 @@ func (mr *MockStoreMockRecorder) DeleteAPIKeysByUserID(ctx, userID any) *gomock.
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAPIKeysByUserID", reflect.TypeOf((*MockStore)(nil).DeleteAPIKeysByUserID), ctx, userID)
|
||||
}
|
||||
|
||||
// DeleteAllTailnetClientSubscriptions mocks base method.
|
||||
func (m *MockStore) DeleteAllTailnetClientSubscriptions(ctx context.Context, arg database.DeleteAllTailnetClientSubscriptionsParams) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DeleteAllTailnetClientSubscriptions", ctx, arg)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// DeleteAllTailnetClientSubscriptions indicates an expected call of DeleteAllTailnetClientSubscriptions.
|
||||
func (mr *MockStoreMockRecorder) DeleteAllTailnetClientSubscriptions(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllTailnetClientSubscriptions", reflect.TypeOf((*MockStore)(nil).DeleteAllTailnetClientSubscriptions), ctx, arg)
|
||||
}
|
||||
|
||||
// DeleteAllTailnetTunnels mocks base method.
|
||||
func (m *MockStore) DeleteAllTailnetTunnels(ctx context.Context, arg database.DeleteAllTailnetTunnelsParams) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -511,18 +511,18 @@ func (mr *MockStoreMockRecorder) DeleteApplicationConnectAPIKeysByUserID(ctx, us
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteApplicationConnectAPIKeysByUserID", reflect.TypeOf((*MockStore)(nil).DeleteApplicationConnectAPIKeysByUserID), ctx, userID)
|
||||
}
|
||||
|
||||
// DeleteCoordinator mocks base method.
|
||||
func (m *MockStore) DeleteCoordinator(ctx context.Context, id uuid.UUID) error {
|
||||
// DeleteBoundaryUsageStatsByReplicaID mocks base method.
|
||||
func (m *MockStore) DeleteBoundaryUsageStatsByReplicaID(ctx context.Context, replicaID uuid.UUID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DeleteCoordinator", ctx, id)
|
||||
ret := m.ctrl.Call(m, "DeleteBoundaryUsageStatsByReplicaID", ctx, replicaID)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// DeleteCoordinator indicates an expected call of DeleteCoordinator.
|
||||
func (mr *MockStoreMockRecorder) DeleteCoordinator(ctx, id any) *gomock.Call {
|
||||
// DeleteBoundaryUsageStatsByReplicaID indicates an expected call of DeleteBoundaryUsageStatsByReplicaID.
|
||||
func (mr *MockStoreMockRecorder) DeleteBoundaryUsageStatsByReplicaID(ctx, replicaID any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCoordinator", reflect.TypeOf((*MockStore)(nil).DeleteCoordinator), ctx, id)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBoundaryUsageStatsByReplicaID", reflect.TypeOf((*MockStore)(nil).DeleteBoundaryUsageStatsByReplicaID), ctx, replicaID)
|
||||
}
|
||||
|
||||
// DeleteCryptoKey mocks base method.
|
||||
@@ -910,50 +910,6 @@ func (mr *MockStoreMockRecorder) DeleteRuntimeConfig(ctx, key any) *gomock.Call
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRuntimeConfig", reflect.TypeOf((*MockStore)(nil).DeleteRuntimeConfig), ctx, key)
|
||||
}
|
||||
|
||||
// DeleteTailnetAgent mocks base method.
|
||||
func (m *MockStore) DeleteTailnetAgent(ctx context.Context, arg database.DeleteTailnetAgentParams) (database.DeleteTailnetAgentRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DeleteTailnetAgent", ctx, arg)
|
||||
ret0, _ := ret[0].(database.DeleteTailnetAgentRow)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// DeleteTailnetAgent indicates an expected call of DeleteTailnetAgent.
|
||||
func (mr *MockStoreMockRecorder) DeleteTailnetAgent(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTailnetAgent", reflect.TypeOf((*MockStore)(nil).DeleteTailnetAgent), ctx, arg)
|
||||
}
|
||||
|
||||
// DeleteTailnetClient mocks base method.
|
||||
func (m *MockStore) DeleteTailnetClient(ctx context.Context, arg database.DeleteTailnetClientParams) (database.DeleteTailnetClientRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DeleteTailnetClient", ctx, arg)
|
||||
ret0, _ := ret[0].(database.DeleteTailnetClientRow)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// DeleteTailnetClient indicates an expected call of DeleteTailnetClient.
|
||||
func (mr *MockStoreMockRecorder) DeleteTailnetClient(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTailnetClient", reflect.TypeOf((*MockStore)(nil).DeleteTailnetClient), ctx, arg)
|
||||
}
|
||||
|
||||
// DeleteTailnetClientSubscription mocks base method.
|
||||
func (m *MockStore) DeleteTailnetClientSubscription(ctx context.Context, arg database.DeleteTailnetClientSubscriptionParams) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DeleteTailnetClientSubscription", ctx, arg)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// DeleteTailnetClientSubscription indicates an expected call of DeleteTailnetClientSubscription.
|
||||
func (mr *MockStoreMockRecorder) DeleteTailnetClientSubscription(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTailnetClientSubscription", reflect.TypeOf((*MockStore)(nil).DeleteTailnetClientSubscription), ctx, arg)
|
||||
}
|
||||
|
||||
// DeleteTailnetPeer mocks base method.
|
||||
func (m *MockStore) DeleteTailnetPeer(ctx context.Context, arg database.DeleteTailnetPeerParams) (database.DeleteTailnetPeerRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -1452,21 +1408,6 @@ func (mr *MockStoreMockRecorder) GetActiveWorkspaceBuildsByTemplateID(ctx, templ
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActiveWorkspaceBuildsByTemplateID", reflect.TypeOf((*MockStore)(nil).GetActiveWorkspaceBuildsByTemplateID), ctx, templateID)
|
||||
}
|
||||
|
||||
// GetAllTailnetAgents mocks base method.
|
||||
func (m *MockStore) GetAllTailnetAgents(ctx context.Context) ([]database.TailnetAgent, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetAllTailnetAgents", ctx)
|
||||
ret0, _ := ret[0].([]database.TailnetAgent)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetAllTailnetAgents indicates an expected call of GetAllTailnetAgents.
|
||||
func (mr *MockStoreMockRecorder) GetAllTailnetAgents(ctx any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllTailnetAgents", reflect.TypeOf((*MockStore)(nil).GetAllTailnetAgents), ctx)
|
||||
}
|
||||
|
||||
// GetAllTailnetCoordinators mocks base method.
|
||||
func (m *MockStore) GetAllTailnetCoordinators(ctx context.Context) ([]database.TailnetCoordinator, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -1707,6 +1648,21 @@ func (mr *MockStoreMockRecorder) GetAuthorizedWorkspacesAndAgentsByOwnerID(ctx,
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAuthorizedWorkspacesAndAgentsByOwnerID", reflect.TypeOf((*MockStore)(nil).GetAuthorizedWorkspacesAndAgentsByOwnerID), ctx, ownerID, prepared)
|
||||
}
|
||||
|
||||
// GetBoundaryUsageSummary mocks base method.
|
||||
func (m *MockStore) GetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (database.GetBoundaryUsageSummaryRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetBoundaryUsageSummary", ctx, maxStalenessMs)
|
||||
ret0, _ := ret[0].(database.GetBoundaryUsageSummaryRow)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetBoundaryUsageSummary indicates an expected call of GetBoundaryUsageSummary.
|
||||
func (mr *MockStoreMockRecorder) GetBoundaryUsageSummary(ctx, maxStalenessMs any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBoundaryUsageSummary", reflect.TypeOf((*MockStore)(nil).GetBoundaryUsageSummary), ctx, maxStalenessMs)
|
||||
}
|
||||
|
||||
// GetConnectionLogsOffset mocks base method.
|
||||
func (m *MockStore) GetConnectionLogsOffset(ctx context.Context, arg database.GetConnectionLogsOffsetParams) ([]database.GetConnectionLogsOffsetRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -3237,36 +3193,6 @@ func (mr *MockStoreMockRecorder) GetRuntimeConfig(ctx, key any) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRuntimeConfig", reflect.TypeOf((*MockStore)(nil).GetRuntimeConfig), ctx, key)
|
||||
}
|
||||
|
||||
// GetTailnetAgents mocks base method.
|
||||
func (m *MockStore) GetTailnetAgents(ctx context.Context, id uuid.UUID) ([]database.TailnetAgent, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetTailnetAgents", ctx, id)
|
||||
ret0, _ := ret[0].([]database.TailnetAgent)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetTailnetAgents indicates an expected call of GetTailnetAgents.
|
||||
func (mr *MockStoreMockRecorder) GetTailnetAgents(ctx, id any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTailnetAgents", reflect.TypeOf((*MockStore)(nil).GetTailnetAgents), ctx, id)
|
||||
}
|
||||
|
||||
// GetTailnetClientsForAgent mocks base method.
|
||||
func (m *MockStore) GetTailnetClientsForAgent(ctx context.Context, agentID uuid.UUID) ([]database.TailnetClient, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetTailnetClientsForAgent", ctx, agentID)
|
||||
ret0, _ := ret[0].([]database.TailnetClient)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetTailnetClientsForAgent indicates an expected call of GetTailnetClientsForAgent.
|
||||
func (mr *MockStoreMockRecorder) GetTailnetClientsForAgent(ctx, agentID any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTailnetClientsForAgent", reflect.TypeOf((*MockStore)(nil).GetTailnetClientsForAgent), ctx, agentID)
|
||||
}
|
||||
|
||||
// GetTailnetPeers mocks base method.
|
||||
func (m *MockStore) GetTailnetPeers(ctx context.Context, id uuid.UUID) ([]database.TailnetPeer, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -3357,6 +3283,21 @@ func (mr *MockStoreMockRecorder) GetTaskByWorkspaceID(ctx, workspaceID any) *gom
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskByWorkspaceID", reflect.TypeOf((*MockStore)(nil).GetTaskByWorkspaceID), ctx, workspaceID)
|
||||
}
|
||||
|
||||
// GetTaskSnapshot mocks base method.
|
||||
func (m *MockStore) GetTaskSnapshot(ctx context.Context, taskID uuid.UUID) (database.TaskSnapshot, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetTaskSnapshot", ctx, taskID)
|
||||
ret0, _ := ret[0].(database.TaskSnapshot)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetTaskSnapshot indicates an expected call of GetTaskSnapshot.
|
||||
func (mr *MockStoreMockRecorder) GetTaskSnapshot(ctx, taskID any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskSnapshot", reflect.TypeOf((*MockStore)(nil).GetTaskSnapshot), ctx, taskID)
|
||||
}
|
||||
|
||||
// GetTelemetryItem mocks base method.
|
||||
func (m *MockStore) GetTelemetryItem(ctx context.Context, key string) (database.TelemetryItem, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -6337,6 +6278,20 @@ func (mr *MockStoreMockRecorder) RemoveUserFromGroups(ctx, arg any) *gomock.Call
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveUserFromGroups", reflect.TypeOf((*MockStore)(nil).RemoveUserFromGroups), ctx, arg)
|
||||
}
|
||||
|
||||
// ResetBoundaryUsageStats mocks base method.
|
||||
func (m *MockStore) ResetBoundaryUsageStats(ctx context.Context) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResetBoundaryUsageStats", ctx)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ResetBoundaryUsageStats indicates an expected call of ResetBoundaryUsageStats.
|
||||
func (mr *MockStoreMockRecorder) ResetBoundaryUsageStats(ctx any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetBoundaryUsageStats", reflect.TypeOf((*MockStore)(nil).ResetBoundaryUsageStats), ctx)
|
||||
}
|
||||
|
||||
// RevokeDBCryptKey mocks base method.
|
||||
func (m *MockStore) RevokeDBCryptKey(ctx context.Context, activeKeyDigest string) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -7691,6 +7646,21 @@ func (mr *MockStoreMockRecorder) UpsertApplicationName(ctx, value any) *gomock.C
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertApplicationName", reflect.TypeOf((*MockStore)(nil).UpsertApplicationName), ctx, value)
|
||||
}
|
||||
|
||||
// UpsertBoundaryUsageStats mocks base method.
|
||||
func (m *MockStore) UpsertBoundaryUsageStats(ctx context.Context, arg database.UpsertBoundaryUsageStatsParams) (bool, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UpsertBoundaryUsageStats", ctx, arg)
|
||||
ret0, _ := ret[0].(bool)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// UpsertBoundaryUsageStats indicates an expected call of UpsertBoundaryUsageStats.
|
||||
func (mr *MockStoreMockRecorder) UpsertBoundaryUsageStats(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertBoundaryUsageStats", reflect.TypeOf((*MockStore)(nil).UpsertBoundaryUsageStats), ctx, arg)
|
||||
}
|
||||
|
||||
// UpsertConnectionLog mocks base method.
|
||||
func (m *MockStore) UpsertConnectionLog(ctx context.Context, arg database.UpsertConnectionLogParams) (database.ConnectionLog, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -7875,50 +7845,6 @@ func (mr *MockStoreMockRecorder) UpsertRuntimeConfig(ctx, arg any) *gomock.Call
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertRuntimeConfig", reflect.TypeOf((*MockStore)(nil).UpsertRuntimeConfig), ctx, arg)
|
||||
}
|
||||
|
||||
// UpsertTailnetAgent mocks base method.
|
||||
func (m *MockStore) UpsertTailnetAgent(ctx context.Context, arg database.UpsertTailnetAgentParams) (database.TailnetAgent, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UpsertTailnetAgent", ctx, arg)
|
||||
ret0, _ := ret[0].(database.TailnetAgent)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// UpsertTailnetAgent indicates an expected call of UpsertTailnetAgent.
|
||||
func (mr *MockStoreMockRecorder) UpsertTailnetAgent(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertTailnetAgent", reflect.TypeOf((*MockStore)(nil).UpsertTailnetAgent), ctx, arg)
|
||||
}
|
||||
|
||||
// UpsertTailnetClient mocks base method.
|
||||
func (m *MockStore) UpsertTailnetClient(ctx context.Context, arg database.UpsertTailnetClientParams) (database.TailnetClient, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UpsertTailnetClient", ctx, arg)
|
||||
ret0, _ := ret[0].(database.TailnetClient)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// UpsertTailnetClient indicates an expected call of UpsertTailnetClient.
|
||||
func (mr *MockStoreMockRecorder) UpsertTailnetClient(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertTailnetClient", reflect.TypeOf((*MockStore)(nil).UpsertTailnetClient), ctx, arg)
|
||||
}
|
||||
|
||||
// UpsertTailnetClientSubscription mocks base method.
|
||||
func (m *MockStore) UpsertTailnetClientSubscription(ctx context.Context, arg database.UpsertTailnetClientSubscriptionParams) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UpsertTailnetClientSubscription", ctx, arg)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// UpsertTailnetClientSubscription indicates an expected call of UpsertTailnetClientSubscription.
|
||||
func (mr *MockStoreMockRecorder) UpsertTailnetClientSubscription(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertTailnetClientSubscription", reflect.TypeOf((*MockStore)(nil).UpsertTailnetClientSubscription), ctx, arg)
|
||||
}
|
||||
|
||||
// UpsertTailnetCoordinator mocks base method.
|
||||
func (m *MockStore) UpsertTailnetCoordinator(ctx context.Context, id uuid.UUID) (database.TailnetCoordinator, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -7964,6 +7890,20 @@ func (mr *MockStoreMockRecorder) UpsertTailnetTunnel(ctx, arg any) *gomock.Call
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertTailnetTunnel", reflect.TypeOf((*MockStore)(nil).UpsertTailnetTunnel), ctx, arg)
|
||||
}
|
||||
|
||||
// UpsertTaskSnapshot mocks base method.
|
||||
func (m *MockStore) UpsertTaskSnapshot(ctx context.Context, arg database.UpsertTaskSnapshotParams) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UpsertTaskSnapshot", ctx, arg)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// UpsertTaskSnapshot indicates an expected call of UpsertTaskSnapshot.
|
||||
func (mr *MockStoreMockRecorder) UpsertTaskSnapshot(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertTaskSnapshot", reflect.TypeOf((*MockStore)(nil).UpsertTaskSnapshot), ctx, arg)
|
||||
}
|
||||
|
||||
// UpsertTaskWorkspaceApp mocks base method.
|
||||
func (m *MockStore) UpsertTaskWorkspaceApp(ctx context.Context, arg database.UpsertTaskWorkspaceAppParams) (database.TaskWorkspaceApp, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
||||
Generated
+43
-129
@@ -204,7 +204,11 @@ CREATE TYPE api_key_scope AS ENUM (
|
||||
'task:delete',
|
||||
'task:*',
|
||||
'workspace:share',
|
||||
'workspace_dormant:share'
|
||||
'workspace_dormant:share',
|
||||
'boundary_usage:*',
|
||||
'boundary_usage:delete',
|
||||
'boundary_usage:read',
|
||||
'boundary_usage:update'
|
||||
);
|
||||
|
||||
CREATE TYPE app_sharing_level AS ENUM (
|
||||
@@ -971,80 +975,6 @@ BEGIN
|
||||
END;
|
||||
$$;
|
||||
|
||||
CREATE FUNCTION tailnet_notify_agent_change() RETURNS trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
IF (OLD IS NOT NULL) THEN
|
||||
PERFORM pg_notify('tailnet_agent_update', OLD.id::text);
|
||||
RETURN NULL;
|
||||
END IF;
|
||||
IF (NEW IS NOT NULL) THEN
|
||||
PERFORM pg_notify('tailnet_agent_update', NEW.id::text);
|
||||
RETURN NULL;
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
|
||||
CREATE FUNCTION tailnet_notify_client_change() RETURNS trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
var_client_id uuid;
|
||||
var_coordinator_id uuid;
|
||||
var_agent_ids uuid[];
|
||||
var_agent_id uuid;
|
||||
BEGIN
|
||||
IF (NEW.id IS NOT NULL) THEN
|
||||
var_client_id = NEW.id;
|
||||
var_coordinator_id = NEW.coordinator_id;
|
||||
ELSIF (OLD.id IS NOT NULL) THEN
|
||||
var_client_id = OLD.id;
|
||||
var_coordinator_id = OLD.coordinator_id;
|
||||
END IF;
|
||||
|
||||
-- Read all agents the client is subscribed to, so we can notify them.
|
||||
SELECT
|
||||
array_agg(agent_id)
|
||||
INTO
|
||||
var_agent_ids
|
||||
FROM
|
||||
tailnet_client_subscriptions subs
|
||||
WHERE
|
||||
subs.client_id = NEW.id AND
|
||||
subs.coordinator_id = NEW.coordinator_id;
|
||||
|
||||
-- No agents to notify
|
||||
if (var_agent_ids IS NULL) THEN
|
||||
return NULL;
|
||||
END IF;
|
||||
|
||||
-- pg_notify is limited to 8k bytes, which is approximately 221 UUIDs.
|
||||
-- Instead of sending all agent ids in a single update, send one for each
|
||||
-- agent id to prevent overflow.
|
||||
FOREACH var_agent_id IN ARRAY var_agent_ids
|
||||
LOOP
|
||||
PERFORM pg_notify('tailnet_client_update', var_client_id || ',' || var_agent_id);
|
||||
END LOOP;
|
||||
|
||||
return NULL;
|
||||
END;
|
||||
$$;
|
||||
|
||||
CREATE FUNCTION tailnet_notify_client_subscription_change() RETURNS trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
IF (NEW IS NOT NULL) THEN
|
||||
PERFORM pg_notify('tailnet_client_update', NEW.client_id || ',' || NEW.agent_id);
|
||||
RETURN NULL;
|
||||
ELSIF (OLD IS NOT NULL) THEN
|
||||
PERFORM pg_notify('tailnet_client_update', OLD.client_id || ',' || OLD.agent_id);
|
||||
RETURN NULL;
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
|
||||
CREATE FUNCTION tailnet_notify_coordinator_heartbeat() RETURNS trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
@@ -1185,6 +1115,32 @@ CREATE TABLE audit_logs (
|
||||
resource_icon text NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE boundary_usage_stats (
|
||||
replica_id uuid NOT NULL,
|
||||
unique_workspaces_count bigint DEFAULT 0 NOT NULL,
|
||||
unique_users_count bigint DEFAULT 0 NOT NULL,
|
||||
allowed_requests bigint DEFAULT 0 NOT NULL,
|
||||
denied_requests bigint DEFAULT 0 NOT NULL,
|
||||
window_start timestamp with time zone DEFAULT now() NOT NULL,
|
||||
updated_at timestamp with time zone DEFAULT now() NOT NULL
|
||||
);
|
||||
|
||||
COMMENT ON TABLE boundary_usage_stats IS 'Per-replica boundary usage statistics for telemetry aggregation.';
|
||||
|
||||
COMMENT ON COLUMN boundary_usage_stats.replica_id IS 'The unique identifier of the replica reporting stats.';
|
||||
|
||||
COMMENT ON COLUMN boundary_usage_stats.unique_workspaces_count IS 'Count of unique workspaces that used boundary on this replica.';
|
||||
|
||||
COMMENT ON COLUMN boundary_usage_stats.unique_users_count IS 'Count of unique users that used boundary on this replica.';
|
||||
|
||||
COMMENT ON COLUMN boundary_usage_stats.allowed_requests IS 'Total allowed requests through boundary on this replica.';
|
||||
|
||||
COMMENT ON COLUMN boundary_usage_stats.denied_requests IS 'Total denied requests through boundary on this replica.';
|
||||
|
||||
COMMENT ON COLUMN boundary_usage_stats.window_start IS 'Start of the time window for these stats, set on first flush after reset.';
|
||||
|
||||
COMMENT ON COLUMN boundary_usage_stats.updated_at IS 'Timestamp of the last update to this row.';
|
||||
|
||||
CREATE TABLE connection_logs (
|
||||
id uuid NOT NULL,
|
||||
connect_time timestamp with time zone NOT NULL,
|
||||
@@ -1806,35 +1762,14 @@ CREATE TABLE site_configs (
|
||||
value text NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE tailnet_agents (
|
||||
id uuid NOT NULL,
|
||||
coordinator_id uuid NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
node jsonb NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE tailnet_client_subscriptions (
|
||||
client_id uuid NOT NULL,
|
||||
coordinator_id uuid NOT NULL,
|
||||
agent_id uuid NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE tailnet_clients (
|
||||
id uuid NOT NULL,
|
||||
coordinator_id uuid NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
node jsonb NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE tailnet_coordinators (
|
||||
CREATE UNLOGGED TABLE tailnet_coordinators (
|
||||
id uuid NOT NULL,
|
||||
heartbeat_at timestamp with time zone NOT NULL
|
||||
);
|
||||
|
||||
COMMENT ON TABLE tailnet_coordinators IS 'We keep this separate from replicas in case we need to break the coordinator out into its own service';
|
||||
|
||||
CREATE TABLE tailnet_peers (
|
||||
CREATE UNLOGGED TABLE tailnet_peers (
|
||||
id uuid NOT NULL,
|
||||
coordinator_id uuid NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
@@ -1842,7 +1777,7 @@ CREATE TABLE tailnet_peers (
|
||||
status tailnet_status DEFAULT 'ok'::tailnet_status NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE tailnet_tunnels (
|
||||
CREATE UNLOGGED TABLE tailnet_tunnels (
|
||||
coordinator_id uuid NOT NULL,
|
||||
src_id uuid NOT NULL,
|
||||
dst_id uuid NOT NULL,
|
||||
@@ -2097,7 +2032,7 @@ CREATE TABLE telemetry_items (
|
||||
CREATE TABLE telemetry_locks (
|
||||
event_type text NOT NULL,
|
||||
period_ending_at timestamp with time zone NOT NULL,
|
||||
CONSTRAINT telemetry_lock_event_type_constraint CHECK ((event_type = 'aibridge_interceptions_summary'::text))
|
||||
CONSTRAINT telemetry_lock_event_type_constraint CHECK ((event_type = ANY (ARRAY['aibridge_interceptions_summary'::text, 'boundary_usage_summary'::text])))
|
||||
);
|
||||
|
||||
COMMENT ON TABLE telemetry_locks IS 'Telemetry lock tracking table for deduplication of heartbeat events across replicas.';
|
||||
@@ -2522,7 +2457,8 @@ CREATE TABLE workspace_agent_devcontainers (
|
||||
created_at timestamp with time zone DEFAULT now() NOT NULL,
|
||||
workspace_folder text NOT NULL,
|
||||
config_path text NOT NULL,
|
||||
name text NOT NULL
|
||||
name text NOT NULL,
|
||||
subagent_id uuid
|
||||
);
|
||||
|
||||
COMMENT ON TABLE workspace_agent_devcontainers IS 'Workspace agent devcontainer configuration';
|
||||
@@ -3036,6 +2972,9 @@ ALTER TABLE ONLY api_keys
|
||||
ALTER TABLE ONLY audit_logs
|
||||
ADD CONSTRAINT audit_logs_pkey PRIMARY KEY (id);
|
||||
|
||||
ALTER TABLE ONLY boundary_usage_stats
|
||||
ADD CONSTRAINT boundary_usage_stats_pkey PRIMARY KEY (replica_id);
|
||||
|
||||
ALTER TABLE ONLY connection_logs
|
||||
ADD CONSTRAINT connection_logs_pkey PRIMARY KEY (id);
|
||||
|
||||
@@ -3156,15 +3095,6 @@ ALTER TABLE ONLY provisioner_keys
|
||||
ALTER TABLE ONLY site_configs
|
||||
ADD CONSTRAINT site_configs_key_key UNIQUE (key);
|
||||
|
||||
ALTER TABLE ONLY tailnet_agents
|
||||
ADD CONSTRAINT tailnet_agents_pkey PRIMARY KEY (id, coordinator_id);
|
||||
|
||||
ALTER TABLE ONLY tailnet_client_subscriptions
|
||||
ADD CONSTRAINT tailnet_client_subscriptions_pkey PRIMARY KEY (client_id, coordinator_id, agent_id);
|
||||
|
||||
ALTER TABLE ONLY tailnet_clients
|
||||
ADD CONSTRAINT tailnet_clients_pkey PRIMARY KEY (id, coordinator_id);
|
||||
|
||||
ALTER TABLE ONLY tailnet_coordinators
|
||||
ADD CONSTRAINT tailnet_coordinators_pkey PRIMARY KEY (id);
|
||||
|
||||
@@ -3404,10 +3334,6 @@ COMMENT ON INDEX idx_provisioner_daemons_org_name_owner_key IS 'Allow unique pro
|
||||
|
||||
CREATE INDEX idx_provisioner_jobs_status ON provisioner_jobs USING btree (job_status);
|
||||
|
||||
CREATE INDEX idx_tailnet_agents_coordinator ON tailnet_agents USING btree (coordinator_id);
|
||||
|
||||
CREATE INDEX idx_tailnet_clients_coordinator ON tailnet_clients USING btree (coordinator_id);
|
||||
|
||||
CREATE INDEX idx_tailnet_peers_coordinator ON tailnet_peers USING btree (coordinator_id);
|
||||
|
||||
CREATE INDEX idx_tailnet_tunnels_dst_id ON tailnet_tunnels USING hash (dst_id);
|
||||
@@ -3582,12 +3508,6 @@ CREATE TRIGGER remove_organization_member_custom_role BEFORE DELETE ON custom_ro
|
||||
|
||||
COMMENT ON TRIGGER remove_organization_member_custom_role ON custom_roles IS 'When a custom_role is deleted, this trigger removes the role from all organization members.';
|
||||
|
||||
CREATE TRIGGER tailnet_notify_agent_change AFTER INSERT OR DELETE OR UPDATE ON tailnet_agents FOR EACH ROW EXECUTE FUNCTION tailnet_notify_agent_change();
|
||||
|
||||
CREATE TRIGGER tailnet_notify_client_change AFTER INSERT OR DELETE OR UPDATE ON tailnet_clients FOR EACH ROW EXECUTE FUNCTION tailnet_notify_client_change();
|
||||
|
||||
CREATE TRIGGER tailnet_notify_client_subscription_change AFTER INSERT OR DELETE OR UPDATE ON tailnet_client_subscriptions FOR EACH ROW EXECUTE FUNCTION tailnet_notify_client_subscription_change();
|
||||
|
||||
CREATE TRIGGER tailnet_notify_coordinator_heartbeat AFTER INSERT OR UPDATE ON tailnet_coordinators FOR EACH ROW EXECUTE FUNCTION tailnet_notify_coordinator_heartbeat();
|
||||
|
||||
CREATE TRIGGER tailnet_notify_peer_change AFTER INSERT OR DELETE OR UPDATE ON tailnet_peers FOR EACH ROW EXECUTE FUNCTION tailnet_notify_peer_change();
|
||||
@@ -3725,15 +3645,6 @@ ALTER TABLE ONLY provisioner_jobs
|
||||
ALTER TABLE ONLY provisioner_keys
|
||||
ADD CONSTRAINT provisioner_keys_organization_id_fkey FOREIGN KEY (organization_id) REFERENCES organizations(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY tailnet_agents
|
||||
ADD CONSTRAINT tailnet_agents_coordinator_id_fkey FOREIGN KEY (coordinator_id) REFERENCES tailnet_coordinators(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY tailnet_client_subscriptions
|
||||
ADD CONSTRAINT tailnet_client_subscriptions_coordinator_id_fkey FOREIGN KEY (coordinator_id) REFERENCES tailnet_coordinators(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY tailnet_clients
|
||||
ADD CONSTRAINT tailnet_clients_coordinator_id_fkey FOREIGN KEY (coordinator_id) REFERENCES tailnet_coordinators(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY tailnet_peers
|
||||
ADD CONSTRAINT tailnet_peers_coordinator_id_fkey FOREIGN KEY (coordinator_id) REFERENCES tailnet_coordinators(id) ON DELETE CASCADE;
|
||||
|
||||
@@ -3827,6 +3738,9 @@ ALTER TABLE ONLY user_status_changes
|
||||
ALTER TABLE ONLY webpush_subscriptions
|
||||
ADD CONSTRAINT webpush_subscriptions_user_id_fkey FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY workspace_agent_devcontainers
|
||||
ADD CONSTRAINT workspace_agent_devcontainers_subagent_id_fkey FOREIGN KEY (subagent_id) REFERENCES workspace_agents(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY workspace_agent_devcontainers
|
||||
ADD CONSTRAINT workspace_agent_devcontainers_workspace_agent_id_fkey FOREIGN KEY (workspace_agent_id) REFERENCES workspace_agents(id) ON DELETE CASCADE;
|
||||
|
||||
|
||||
@@ -41,9 +41,6 @@ const (
|
||||
ForeignKeyProvisionerJobTimingsJobID ForeignKeyConstraint = "provisioner_job_timings_job_id_fkey" // ALTER TABLE ONLY provisioner_job_timings ADD CONSTRAINT provisioner_job_timings_job_id_fkey FOREIGN KEY (job_id) REFERENCES provisioner_jobs(id) ON DELETE CASCADE;
|
||||
ForeignKeyProvisionerJobsOrganizationID ForeignKeyConstraint = "provisioner_jobs_organization_id_fkey" // ALTER TABLE ONLY provisioner_jobs ADD CONSTRAINT provisioner_jobs_organization_id_fkey FOREIGN KEY (organization_id) REFERENCES organizations(id) ON DELETE CASCADE;
|
||||
ForeignKeyProvisionerKeysOrganizationID ForeignKeyConstraint = "provisioner_keys_organization_id_fkey" // ALTER TABLE ONLY provisioner_keys ADD CONSTRAINT provisioner_keys_organization_id_fkey FOREIGN KEY (organization_id) REFERENCES organizations(id) ON DELETE CASCADE;
|
||||
ForeignKeyTailnetAgentsCoordinatorID ForeignKeyConstraint = "tailnet_agents_coordinator_id_fkey" // ALTER TABLE ONLY tailnet_agents ADD CONSTRAINT tailnet_agents_coordinator_id_fkey FOREIGN KEY (coordinator_id) REFERENCES tailnet_coordinators(id) ON DELETE CASCADE;
|
||||
ForeignKeyTailnetClientSubscriptionsCoordinatorID ForeignKeyConstraint = "tailnet_client_subscriptions_coordinator_id_fkey" // ALTER TABLE ONLY tailnet_client_subscriptions ADD CONSTRAINT tailnet_client_subscriptions_coordinator_id_fkey FOREIGN KEY (coordinator_id) REFERENCES tailnet_coordinators(id) ON DELETE CASCADE;
|
||||
ForeignKeyTailnetClientsCoordinatorID ForeignKeyConstraint = "tailnet_clients_coordinator_id_fkey" // ALTER TABLE ONLY tailnet_clients ADD CONSTRAINT tailnet_clients_coordinator_id_fkey FOREIGN KEY (coordinator_id) REFERENCES tailnet_coordinators(id) ON DELETE CASCADE;
|
||||
ForeignKeyTailnetPeersCoordinatorID ForeignKeyConstraint = "tailnet_peers_coordinator_id_fkey" // ALTER TABLE ONLY tailnet_peers ADD CONSTRAINT tailnet_peers_coordinator_id_fkey FOREIGN KEY (coordinator_id) REFERENCES tailnet_coordinators(id) ON DELETE CASCADE;
|
||||
ForeignKeyTailnetTunnelsCoordinatorID ForeignKeyConstraint = "tailnet_tunnels_coordinator_id_fkey" // ALTER TABLE ONLY tailnet_tunnels ADD CONSTRAINT tailnet_tunnels_coordinator_id_fkey FOREIGN KEY (coordinator_id) REFERENCES tailnet_coordinators(id) ON DELETE CASCADE;
|
||||
ForeignKeyTaskSnapshotsTaskID ForeignKeyConstraint = "task_snapshots_task_id_fkey" // ALTER TABLE ONLY task_snapshots ADD CONSTRAINT task_snapshots_task_id_fkey FOREIGN KEY (task_id) REFERENCES tasks(id) ON DELETE CASCADE;
|
||||
@@ -75,6 +72,7 @@ const (
|
||||
ForeignKeyUserSecretsUserID ForeignKeyConstraint = "user_secrets_user_id_fkey" // ALTER TABLE ONLY user_secrets ADD CONSTRAINT user_secrets_user_id_fkey FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE;
|
||||
ForeignKeyUserStatusChangesUserID ForeignKeyConstraint = "user_status_changes_user_id_fkey" // ALTER TABLE ONLY user_status_changes ADD CONSTRAINT user_status_changes_user_id_fkey FOREIGN KEY (user_id) REFERENCES users(id);
|
||||
ForeignKeyWebpushSubscriptionsUserID ForeignKeyConstraint = "webpush_subscriptions_user_id_fkey" // ALTER TABLE ONLY webpush_subscriptions ADD CONSTRAINT webpush_subscriptions_user_id_fkey FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE;
|
||||
ForeignKeyWorkspaceAgentDevcontainersSubagentID ForeignKeyConstraint = "workspace_agent_devcontainers_subagent_id_fkey" // ALTER TABLE ONLY workspace_agent_devcontainers ADD CONSTRAINT workspace_agent_devcontainers_subagent_id_fkey FOREIGN KEY (subagent_id) REFERENCES workspace_agents(id) ON DELETE CASCADE;
|
||||
ForeignKeyWorkspaceAgentDevcontainersWorkspaceAgentID ForeignKeyConstraint = "workspace_agent_devcontainers_workspace_agent_id_fkey" // ALTER TABLE ONLY workspace_agent_devcontainers ADD CONSTRAINT workspace_agent_devcontainers_workspace_agent_id_fkey FOREIGN KEY (workspace_agent_id) REFERENCES workspace_agents(id) ON DELETE CASCADE;
|
||||
ForeignKeyWorkspaceAgentLogSourcesWorkspaceAgentID ForeignKeyConstraint = "workspace_agent_log_sources_workspace_agent_id_fkey" // ALTER TABLE ONLY workspace_agent_log_sources ADD CONSTRAINT workspace_agent_log_sources_workspace_agent_id_fkey FOREIGN KEY (workspace_agent_id) REFERENCES workspace_agents(id) ON DELETE CASCADE;
|
||||
ForeignKeyWorkspaceAgentMemoryResourceMonitorsAgentID ForeignKeyConstraint = "workspace_agent_memory_resource_monitors_agent_id_fkey" // ALTER TABLE ONLY workspace_agent_memory_resource_monitors ADD CONSTRAINT workspace_agent_memory_resource_monitors_agent_id_fkey FOREIGN KEY (agent_id) REFERENCES workspace_agents(id) ON DELETE CASCADE;
|
||||
|
||||
@@ -0,0 +1,124 @@
|
||||
-- Restore tailnet v1 API tables (unused, but required for rollback).
|
||||
|
||||
-- Create tables.
|
||||
CREATE TABLE tailnet_clients (
|
||||
id uuid NOT NULL,
|
||||
coordinator_id uuid NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
node jsonb NOT NULL,
|
||||
PRIMARY KEY (id, coordinator_id),
|
||||
FOREIGN KEY (coordinator_id) REFERENCES tailnet_coordinators (id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE TABLE tailnet_agents (
|
||||
id uuid NOT NULL,
|
||||
coordinator_id uuid NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
node jsonb NOT NULL,
|
||||
PRIMARY KEY (id, coordinator_id),
|
||||
FOREIGN KEY (coordinator_id) REFERENCES tailnet_coordinators (id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE TABLE tailnet_client_subscriptions (
|
||||
client_id uuid NOT NULL,
|
||||
coordinator_id uuid NOT NULL,
|
||||
agent_id uuid NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY (client_id, coordinator_id, agent_id),
|
||||
FOREIGN KEY (coordinator_id) REFERENCES tailnet_coordinators (id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Create indexes.
|
||||
CREATE INDEX idx_tailnet_agents_coordinator ON tailnet_agents USING btree (coordinator_id);
|
||||
CREATE INDEX idx_tailnet_clients_coordinator ON tailnet_clients USING btree (coordinator_id);
|
||||
|
||||
-- Create trigger functions.
|
||||
CREATE FUNCTION tailnet_notify_agent_change() RETURNS trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
IF (OLD IS NOT NULL) THEN
|
||||
PERFORM pg_notify('tailnet_agent_update', OLD.id::text);
|
||||
RETURN NULL;
|
||||
END IF;
|
||||
IF (NEW IS NOT NULL) THEN
|
||||
PERFORM pg_notify('tailnet_agent_update', NEW.id::text);
|
||||
RETURN NULL;
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
|
||||
CREATE FUNCTION tailnet_notify_client_change() RETURNS trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
var_client_id uuid;
|
||||
var_coordinator_id uuid;
|
||||
var_agent_ids uuid[];
|
||||
var_agent_id uuid;
|
||||
BEGIN
|
||||
IF (NEW.id IS NOT NULL) THEN
|
||||
var_client_id = NEW.id;
|
||||
var_coordinator_id = NEW.coordinator_id;
|
||||
ELSIF (OLD.id IS NOT NULL) THEN
|
||||
var_client_id = OLD.id;
|
||||
var_coordinator_id = OLD.coordinator_id;
|
||||
END IF;
|
||||
|
||||
-- Read all agents the client is subscribed to, so we can notify them.
|
||||
SELECT
|
||||
array_agg(agent_id)
|
||||
INTO
|
||||
var_agent_ids
|
||||
FROM
|
||||
tailnet_client_subscriptions subs
|
||||
WHERE
|
||||
subs.client_id = NEW.id AND
|
||||
subs.coordinator_id = NEW.coordinator_id;
|
||||
|
||||
-- No agents to notify
|
||||
if (var_agent_ids IS NULL) THEN
|
||||
return NULL;
|
||||
END IF;
|
||||
|
||||
-- pg_notify is limited to 8k bytes, which is approximately 221 UUIDs.
|
||||
-- Instead of sending all agent ids in a single update, send one for each
|
||||
-- agent id to prevent overflow.
|
||||
FOREACH var_agent_id IN ARRAY var_agent_ids
|
||||
LOOP
|
||||
PERFORM pg_notify('tailnet_client_update', var_client_id || ',' || var_agent_id);
|
||||
END LOOP;
|
||||
|
||||
return NULL;
|
||||
END;
|
||||
$$;
|
||||
|
||||
CREATE FUNCTION tailnet_notify_client_subscription_change() RETURNS trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
IF (NEW IS NOT NULL) THEN
|
||||
PERFORM pg_notify('tailnet_client_update', NEW.client_id || ',' || NEW.agent_id);
|
||||
RETURN NULL;
|
||||
ELSIF (OLD IS NOT NULL) THEN
|
||||
PERFORM pg_notify('tailnet_client_update', OLD.client_id || ',' || OLD.agent_id);
|
||||
RETURN NULL;
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Create triggers.
|
||||
CREATE TRIGGER tailnet_notify_agent_change
|
||||
AFTER INSERT OR DELETE OR UPDATE ON tailnet_agents
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION tailnet_notify_agent_change();
|
||||
|
||||
CREATE TRIGGER tailnet_notify_client_change
|
||||
AFTER INSERT OR DELETE OR UPDATE ON tailnet_clients
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION tailnet_notify_client_change();
|
||||
|
||||
CREATE TRIGGER tailnet_notify_client_subscription_change
|
||||
AFTER INSERT OR DELETE OR UPDATE ON tailnet_client_subscriptions
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION tailnet_notify_client_subscription_change();
|
||||
@@ -0,0 +1,20 @@
|
||||
-- Remove unused tailnet v1 API tables.
|
||||
-- These tables were superseded by tailnet_peers and tailnet_tunnels in migration
|
||||
-- 000168. The v1 API code was removed in commit d6154c4310 ("remove tailnet v1
|
||||
-- API support"), but the tables and queries were never cleaned up.
|
||||
|
||||
-- Drop triggers first (they reference the functions).
|
||||
DROP TRIGGER IF EXISTS tailnet_notify_agent_change ON tailnet_agents;
|
||||
DROP TRIGGER IF EXISTS tailnet_notify_client_change ON tailnet_clients;
|
||||
DROP TRIGGER IF EXISTS tailnet_notify_client_subscription_change ON tailnet_client_subscriptions;
|
||||
|
||||
-- Drop the trigger functions.
|
||||
DROP FUNCTION IF EXISTS tailnet_notify_agent_change();
|
||||
DROP FUNCTION IF EXISTS tailnet_notify_client_change();
|
||||
DROP FUNCTION IF EXISTS tailnet_notify_client_subscription_change();
|
||||
|
||||
-- Drop the tables. Foreign keys and indexes are dropped automatically via CASCADE.
|
||||
-- Order matters due to potential foreign key relationships.
|
||||
DROP TABLE IF EXISTS tailnet_client_subscriptions;
|
||||
DROP TABLE IF EXISTS tailnet_agents;
|
||||
DROP TABLE IF EXISTS tailnet_clients;
|
||||
@@ -0,0 +1,8 @@
|
||||
-- Restore the original telemetry_locks event_type constraint.
|
||||
ALTER TABLE telemetry_locks DROP CONSTRAINT telemetry_lock_event_type_constraint;
|
||||
ALTER TABLE telemetry_locks ADD CONSTRAINT telemetry_lock_event_type_constraint
|
||||
CHECK (event_type IN ('aibridge_interceptions_summary'));
|
||||
|
||||
DROP TABLE boundary_usage_stats;
|
||||
|
||||
-- No-op for boundary_usage scopes: keep enum values to avoid dependency churn.
|
||||
@@ -0,0 +1,29 @@
|
||||
CREATE TABLE boundary_usage_stats (
|
||||
replica_id UUID PRIMARY KEY,
|
||||
unique_workspaces_count BIGINT NOT NULL DEFAULT 0,
|
||||
unique_users_count BIGINT NOT NULL DEFAULT 0,
|
||||
allowed_requests BIGINT NOT NULL DEFAULT 0,
|
||||
denied_requests BIGINT NOT NULL DEFAULT 0,
|
||||
window_start TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
COMMENT ON TABLE boundary_usage_stats IS 'Per-replica boundary usage statistics for telemetry aggregation.';
|
||||
COMMENT ON COLUMN boundary_usage_stats.replica_id IS 'The unique identifier of the replica reporting stats.';
|
||||
COMMENT ON COLUMN boundary_usage_stats.unique_workspaces_count IS 'Count of unique workspaces that used boundary on this replica.';
|
||||
COMMENT ON COLUMN boundary_usage_stats.unique_users_count IS 'Count of unique users that used boundary on this replica.';
|
||||
COMMENT ON COLUMN boundary_usage_stats.allowed_requests IS 'Total allowed requests through boundary on this replica.';
|
||||
COMMENT ON COLUMN boundary_usage_stats.denied_requests IS 'Total denied requests through boundary on this replica.';
|
||||
COMMENT ON COLUMN boundary_usage_stats.window_start IS 'Start of the time window for these stats, set on first flush after reset.';
|
||||
COMMENT ON COLUMN boundary_usage_stats.updated_at IS 'Timestamp of the last update to this row.';
|
||||
|
||||
-- Add boundary_usage_summary to the telemetry_locks event_type constraint.
|
||||
ALTER TABLE telemetry_locks DROP CONSTRAINT telemetry_lock_event_type_constraint;
|
||||
ALTER TABLE telemetry_locks ADD CONSTRAINT telemetry_lock_event_type_constraint
|
||||
CHECK (event_type IN ('aibridge_interceptions_summary', 'boundary_usage_summary'));
|
||||
|
||||
-- Add boundary_usage scopes for RBAC.
|
||||
ALTER TYPE api_key_scope ADD VALUE IF NOT EXISTS 'boundary_usage:*';
|
||||
ALTER TYPE api_key_scope ADD VALUE IF NOT EXISTS 'boundary_usage:delete';
|
||||
ALTER TYPE api_key_scope ADD VALUE IF NOT EXISTS 'boundary_usage:read';
|
||||
ALTER TYPE api_key_scope ADD VALUE IF NOT EXISTS 'boundary_usage:update';
|
||||
@@ -0,0 +1,10 @@
|
||||
-- Revert tailnet tables to LOGGED (standard WAL-enabled tables).
|
||||
-- WARNING: This requires a full table rewrite with WAL generation,
|
||||
-- which can be slow for large tables.
|
||||
|
||||
-- Convert parent table first (before children, reverse of up migration).
|
||||
ALTER TABLE tailnet_coordinators SET LOGGED;
|
||||
|
||||
-- Convert child tables after parent.
|
||||
ALTER TABLE tailnet_peers SET LOGGED;
|
||||
ALTER TABLE tailnet_tunnels SET LOGGED;
|
||||
@@ -0,0 +1,20 @@
|
||||
-- Convert all tailnet coordination tables to UNLOGGED for improved write performance.
|
||||
-- These tables contain ephemeral coordination data that can be safely reconstructed
|
||||
-- after a crash. UNLOGGED tables skip WAL writes, significantly improving performance
|
||||
-- for high-frequency updates like coordinator heartbeats and peer state changes.
|
||||
--
|
||||
-- IMPORTANT: UNLOGGED tables are truncated on crash recovery and are not replicated
|
||||
-- to standby servers. This is acceptable because:
|
||||
-- 1. Coordinators re-register on startup
|
||||
-- 2. Peers re-establish connections on reconnect
|
||||
-- 3. Tunnels are re-created based on current peer state
|
||||
|
||||
-- Convert child tables first (they have FK references to tailnet_coordinators).
|
||||
-- UNLOGGED child tables can reference LOGGED parent tables, but LOGGED child
|
||||
-- tables cannot reference UNLOGGED parent tables. So we must convert children
|
||||
-- before converting the parent.
|
||||
ALTER TABLE tailnet_tunnels SET UNLOGGED;
|
||||
ALTER TABLE tailnet_peers SET UNLOGGED;
|
||||
|
||||
-- Convert parent table last (after all children are unlogged).
|
||||
ALTER TABLE tailnet_coordinators SET UNLOGGED;
|
||||
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE workspace_agent_devcontainers
|
||||
DROP COLUMN subagent_id;
|
||||
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE workspace_agent_devcontainers
|
||||
ADD COLUMN subagent_id UUID REFERENCES workspace_agents(id) ON DELETE CASCADE;
|
||||
+2
@@ -0,0 +1,2 @@
|
||||
INSERT INTO boundary_usage_stats (replica_id, unique_workspaces_count, unique_users_count, allowed_requests, denied_requests, window_start, updated_at)
|
||||
VALUES ('00000000-0000-0000-0000-000000000001', 10, 5, 100, 20, NOW(), NOW());
|
||||
+33
-23
@@ -213,6 +213,10 @@ const (
|
||||
ApiKeyScopeTask APIKeyScope = "task:*"
|
||||
ApiKeyScopeWorkspaceShare APIKeyScope = "workspace:share"
|
||||
ApiKeyScopeWorkspaceDormantShare APIKeyScope = "workspace_dormant:share"
|
||||
ApiKeyScopeBoundaryUsage APIKeyScope = "boundary_usage:*"
|
||||
ApiKeyScopeBoundaryUsageDelete APIKeyScope = "boundary_usage:delete"
|
||||
ApiKeyScopeBoundaryUsageRead APIKeyScope = "boundary_usage:read"
|
||||
ApiKeyScopeBoundaryUsageUpdate APIKeyScope = "boundary_usage:update"
|
||||
)
|
||||
|
||||
func (e *APIKeyScope) Scan(src interface{}) error {
|
||||
@@ -445,7 +449,11 @@ func (e APIKeyScope) Valid() bool {
|
||||
ApiKeyScopeTaskDelete,
|
||||
ApiKeyScopeTask,
|
||||
ApiKeyScopeWorkspaceShare,
|
||||
ApiKeyScopeWorkspaceDormantShare:
|
||||
ApiKeyScopeWorkspaceDormantShare,
|
||||
ApiKeyScopeBoundaryUsage,
|
||||
ApiKeyScopeBoundaryUsageDelete,
|
||||
ApiKeyScopeBoundaryUsageRead,
|
||||
ApiKeyScopeBoundaryUsageUpdate:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@@ -647,6 +655,10 @@ func AllAPIKeyScopeValues() []APIKeyScope {
|
||||
ApiKeyScopeTask,
|
||||
ApiKeyScopeWorkspaceShare,
|
||||
ApiKeyScopeWorkspaceDormantShare,
|
||||
ApiKeyScopeBoundaryUsage,
|
||||
ApiKeyScopeBoundaryUsageDelete,
|
||||
ApiKeyScopeBoundaryUsageRead,
|
||||
ApiKeyScopeBoundaryUsageUpdate,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3702,6 +3714,24 @@ type AuditLog struct {
|
||||
ResourceIcon string `db:"resource_icon" json:"resource_icon"`
|
||||
}
|
||||
|
||||
// Per-replica boundary usage statistics for telemetry aggregation.
|
||||
type BoundaryUsageStat struct {
|
||||
// The unique identifier of the replica reporting stats.
|
||||
ReplicaID uuid.UUID `db:"replica_id" json:"replica_id"`
|
||||
// Count of unique workspaces that used boundary on this replica.
|
||||
UniqueWorkspacesCount int64 `db:"unique_workspaces_count" json:"unique_workspaces_count"`
|
||||
// Count of unique users that used boundary on this replica.
|
||||
UniqueUsersCount int64 `db:"unique_users_count" json:"unique_users_count"`
|
||||
// Total allowed requests through boundary on this replica.
|
||||
AllowedRequests int64 `db:"allowed_requests" json:"allowed_requests"`
|
||||
// Total denied requests through boundary on this replica.
|
||||
DeniedRequests int64 `db:"denied_requests" json:"denied_requests"`
|
||||
// Start of the time window for these stats, set on first flush after reset.
|
||||
WindowStart time.Time `db:"window_start" json:"window_start"`
|
||||
// Timestamp of the last update to this row.
|
||||
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
||||
}
|
||||
|
||||
type ConnectionLog struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
ConnectTime time.Time `db:"connect_time" json:"connect_time"`
|
||||
@@ -4178,27 +4208,6 @@ type SiteConfig struct {
|
||||
Value string `db:"value" json:"value"`
|
||||
}
|
||||
|
||||
type TailnetAgent struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
||||
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
||||
Node json.RawMessage `db:"node" json:"node"`
|
||||
}
|
||||
|
||||
type TailnetClient struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
||||
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
||||
Node json.RawMessage `db:"node" json:"node"`
|
||||
}
|
||||
|
||||
type TailnetClientSubscription struct {
|
||||
ClientID uuid.UUID `db:"client_id" json:"client_id"`
|
||||
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
||||
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
||||
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
||||
}
|
||||
|
||||
// We keep this separate from replicas in case we need to break the coordinator out into its own service
|
||||
type TailnetCoordinator struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
@@ -4762,7 +4771,8 @@ type WorkspaceAgentDevcontainer struct {
|
||||
// Path to devcontainer.json.
|
||||
ConfigPath string `db:"config_path" json:"config_path"`
|
||||
// The name of the Dev Container.
|
||||
Name string `db:"name" json:"name"`
|
||||
Name string `db:"name" json:"name"`
|
||||
SubagentID uuid.NullUUID `db:"subagent_id" json:"subagent_id"`
|
||||
}
|
||||
|
||||
type WorkspaceAgentLog struct {
|
||||
|
||||
+16
-11
@@ -56,6 +56,7 @@ type sqlcQuerier interface {
|
||||
// Only unused template versions will be archived, which are any versions not
|
||||
// referenced by the latest build of a workspace.
|
||||
ArchiveUnusedTemplateVersions(ctx context.Context, arg ArchiveUnusedTemplateVersionsParams) ([]uuid.UUID, error)
|
||||
BatchUpdateWorkspaceAgentMetadata(ctx context.Context, arg BatchUpdateWorkspaceAgentMetadataParams) error
|
||||
BatchUpdateWorkspaceLastUsedAt(ctx context.Context, arg BatchUpdateWorkspaceLastUsedAtParams) error
|
||||
BatchUpdateWorkspaceNextStartAt(ctx context.Context, arg BatchUpdateWorkspaceNextStartAtParams) error
|
||||
BulkMarkNotificationMessagesFailed(ctx context.Context, arg BulkMarkNotificationMessagesFailedParams) (int64, error)
|
||||
@@ -80,7 +81,6 @@ type sqlcQuerier interface {
|
||||
CustomRoles(ctx context.Context, arg CustomRolesParams) ([]CustomRole, error)
|
||||
DeleteAPIKeyByID(ctx context.Context, id string) error
|
||||
DeleteAPIKeysByUserID(ctx context.Context, userID uuid.UUID) error
|
||||
DeleteAllTailnetClientSubscriptions(ctx context.Context, arg DeleteAllTailnetClientSubscriptionsParams) error
|
||||
DeleteAllTailnetTunnels(ctx context.Context, arg DeleteAllTailnetTunnelsParams) error
|
||||
// Deletes all existing webpush subscriptions.
|
||||
// This should be called when the VAPID keypair is regenerated, as the old
|
||||
@@ -88,7 +88,8 @@ type sqlcQuerier interface {
|
||||
// be recreated.
|
||||
DeleteAllWebpushSubscriptions(ctx context.Context) error
|
||||
DeleteApplicationConnectAPIKeysByUserID(ctx context.Context, userID uuid.UUID) error
|
||||
DeleteCoordinator(ctx context.Context, id uuid.UUID) error
|
||||
// Deletes boundary usage statistics for a specific replica.
|
||||
DeleteBoundaryUsageStatsByReplicaID(ctx context.Context, replicaID uuid.UUID) error
|
||||
DeleteCryptoKey(ctx context.Context, arg DeleteCryptoKeyParams) (CryptoKey, error)
|
||||
DeleteCustomRole(ctx context.Context, arg DeleteCustomRoleParams) error
|
||||
DeleteExpiredAPIKeys(ctx context.Context, arg DeleteExpiredAPIKeysParams) (int64, error)
|
||||
@@ -129,9 +130,6 @@ type sqlcQuerier interface {
|
||||
DeleteProvisionerKey(ctx context.Context, id uuid.UUID) error
|
||||
DeleteReplicasUpdatedBefore(ctx context.Context, updatedAt time.Time) error
|
||||
DeleteRuntimeConfig(ctx context.Context, key string) error
|
||||
DeleteTailnetAgent(ctx context.Context, arg DeleteTailnetAgentParams) (DeleteTailnetAgentRow, error)
|
||||
DeleteTailnetClient(ctx context.Context, arg DeleteTailnetClientParams) (DeleteTailnetClientRow, error)
|
||||
DeleteTailnetClientSubscription(ctx context.Context, arg DeleteTailnetClientSubscriptionParams) error
|
||||
DeleteTailnetPeer(ctx context.Context, arg DeleteTailnetPeerParams) (DeleteTailnetPeerRow, error)
|
||||
DeleteTailnetTunnel(ctx context.Context, arg DeleteTailnetTunnelParams) (DeleteTailnetTunnelRow, error)
|
||||
DeleteTask(ctx context.Context, arg DeleteTaskParams) (TaskTable, error)
|
||||
@@ -179,7 +177,6 @@ type sqlcQuerier interface {
|
||||
GetActivePresetPrebuildSchedules(ctx context.Context) ([]TemplateVersionPresetPrebuildSchedule, error)
|
||||
GetActiveUserCount(ctx context.Context, includeSystem bool) (int64, error)
|
||||
GetActiveWorkspaceBuildsByTemplateID(ctx context.Context, templateID uuid.UUID) ([]WorkspaceBuild, error)
|
||||
GetAllTailnetAgents(ctx context.Context) ([]TailnetAgent, error)
|
||||
// For PG Coordinator HTMLDebug
|
||||
GetAllTailnetCoordinators(ctx context.Context) ([]TailnetCoordinator, error)
|
||||
GetAllTailnetPeers(ctx context.Context) ([]TailnetPeer, error)
|
||||
@@ -199,6 +196,10 @@ type sqlcQuerier interface {
|
||||
// This function returns roles for authorization purposes. Implied member roles
|
||||
// are included.
|
||||
GetAuthorizationUserRoles(ctx context.Context, userID uuid.UUID) (GetAuthorizationUserRolesRow, error)
|
||||
// Aggregates boundary usage statistics across all replicas. Filters to only
|
||||
// include data where window_start is within the given interval to exclude
|
||||
// stale data.
|
||||
GetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (GetBoundaryUsageSummaryRow, error)
|
||||
GetConnectionLogsOffset(ctx context.Context, arg GetConnectionLogsOffsetParams) ([]GetConnectionLogsOffsetRow, error)
|
||||
GetCoordinatorResumeTokenSigningKey(ctx context.Context) (string, error)
|
||||
GetCryptoKeyByFeatureAndSequence(ctx context.Context, arg GetCryptoKeyByFeatureAndSequenceParams) (CryptoKey, error)
|
||||
@@ -352,14 +353,13 @@ type sqlcQuerier interface {
|
||||
GetReplicasUpdatedAfter(ctx context.Context, updatedAt time.Time) ([]Replica, error)
|
||||
GetRunningPrebuiltWorkspaces(ctx context.Context) ([]GetRunningPrebuiltWorkspacesRow, error)
|
||||
GetRuntimeConfig(ctx context.Context, key string) (string, error)
|
||||
GetTailnetAgents(ctx context.Context, id uuid.UUID) ([]TailnetAgent, error)
|
||||
GetTailnetClientsForAgent(ctx context.Context, agentID uuid.UUID) ([]TailnetClient, error)
|
||||
GetTailnetPeers(ctx context.Context, id uuid.UUID) ([]TailnetPeer, error)
|
||||
GetTailnetTunnelPeerBindings(ctx context.Context, srcID uuid.UUID) ([]GetTailnetTunnelPeerBindingsRow, error)
|
||||
GetTailnetTunnelPeerIDs(ctx context.Context, srcID uuid.UUID) ([]GetTailnetTunnelPeerIDsRow, error)
|
||||
GetTaskByID(ctx context.Context, id uuid.UUID) (Task, error)
|
||||
GetTaskByOwnerIDAndName(ctx context.Context, arg GetTaskByOwnerIDAndNameParams) (Task, error)
|
||||
GetTaskByWorkspaceID(ctx context.Context, workspaceID uuid.UUID) (Task, error)
|
||||
GetTaskSnapshot(ctx context.Context, taskID uuid.UUID) (TaskSnapshot, error)
|
||||
GetTelemetryItem(ctx context.Context, key string) (TelemetryItem, error)
|
||||
GetTelemetryItems(ctx context.Context) ([]TelemetryItem, error)
|
||||
// GetTemplateAppInsights returns the aggregate usage of each app in a given
|
||||
@@ -652,6 +652,9 @@ type sqlcQuerier interface {
|
||||
RegisterWorkspaceProxy(ctx context.Context, arg RegisterWorkspaceProxyParams) (WorkspaceProxy, error)
|
||||
RemoveUserFromAllGroups(ctx context.Context, userID uuid.UUID) error
|
||||
RemoveUserFromGroups(ctx context.Context, arg RemoveUserFromGroupsParams) ([]uuid.UUID, error)
|
||||
// Deletes all boundary usage statistics. Called after telemetry reports the
|
||||
// aggregated stats. Each replica will insert a fresh row on its next flush.
|
||||
ResetBoundaryUsageStats(ctx context.Context) error
|
||||
RevokeDBCryptKey(ctx context.Context, activeKeyDigest string) error
|
||||
// Note that this selects from the CTE, not the original table. The CTE is named
|
||||
// the same as the original table to trick sqlc into reusing the existing struct
|
||||
@@ -759,6 +762,10 @@ type sqlcQuerier interface {
|
||||
UpsertAnnouncementBanners(ctx context.Context, value string) error
|
||||
UpsertAppSecurityKey(ctx context.Context, value string) error
|
||||
UpsertApplicationName(ctx context.Context, value string) error
|
||||
// Upserts boundary usage statistics for a replica. All values are replaced with
|
||||
// the current in-memory state. Returns true if this was an insert (new period),
|
||||
// false if update.
|
||||
UpsertBoundaryUsageStats(ctx context.Context, arg UpsertBoundaryUsageStatsParams) (bool, error)
|
||||
UpsertConnectionLog(ctx context.Context, arg UpsertConnectionLogParams) (ConnectionLog, error)
|
||||
UpsertCoordinatorResumeTokenSigningKey(ctx context.Context, value string) error
|
||||
// The default proxy is implied and not actually stored in the database.
|
||||
@@ -776,12 +783,10 @@ type sqlcQuerier interface {
|
||||
UpsertPrebuildsSettings(ctx context.Context, value string) error
|
||||
UpsertProvisionerDaemon(ctx context.Context, arg UpsertProvisionerDaemonParams) (ProvisionerDaemon, error)
|
||||
UpsertRuntimeConfig(ctx context.Context, arg UpsertRuntimeConfigParams) error
|
||||
UpsertTailnetAgent(ctx context.Context, arg UpsertTailnetAgentParams) (TailnetAgent, error)
|
||||
UpsertTailnetClient(ctx context.Context, arg UpsertTailnetClientParams) (TailnetClient, error)
|
||||
UpsertTailnetClientSubscription(ctx context.Context, arg UpsertTailnetClientSubscriptionParams) error
|
||||
UpsertTailnetCoordinator(ctx context.Context, id uuid.UUID) (TailnetCoordinator, error)
|
||||
UpsertTailnetPeer(ctx context.Context, arg UpsertTailnetPeerParams) (TailnetPeer, error)
|
||||
UpsertTailnetTunnel(ctx context.Context, arg UpsertTailnetTunnelParams) (TailnetTunnel, error)
|
||||
UpsertTaskSnapshot(ctx context.Context, arg UpsertTaskSnapshotParams) error
|
||||
UpsertTaskWorkspaceApp(ctx context.Context, arg UpsertTaskWorkspaceAppParams) (TaskWorkspaceApp, error)
|
||||
UpsertTelemetryItem(ctx context.Context, arg UpsertTelemetryItemParams) error
|
||||
// This query aggregates the workspace_agent_stats and workspace_app_stats data
|
||||
|
||||
@@ -7,7 +7,9 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -8482,3 +8484,103 @@ func TestGetAuthenticatedWorkspaceAgentAndBuildByAuthToken_ShutdownScripts(t *te
|
||||
require.ErrorIs(t, err, sql.ErrNoRows, "agent should not authenticate when latest build is not STOP")
|
||||
})
|
||||
}
|
||||
|
||||
// Our `InsertWorkspaceAgentDevcontainers` query should ideally be `[]uuid.NullUUID` but unfortunately
|
||||
// sqlc infers it as `[]uuid.UUID`. To ensure we don't insert a `uuid.Nil`, the query inserts NULL when
|
||||
// passed with `uuid.Nil`. This test ensures we keep this behavior without regression.
|
||||
func TestInsertWorkspaceAgentDevcontainers(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
validSubagent []bool
|
||||
}{
|
||||
{"BothValid", []bool{true, true}},
|
||||
{"FirstValidSecondInvalid", []bool{true, false}},
|
||||
{"FirstInvalidSecondValid", []bool{false, true}},
|
||||
{"BothInvalid", []bool{false, false}},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
db, _ = dbtestutil.NewDB(t)
|
||||
org = dbgen.Organization(t, db, database.Organization{})
|
||||
job = dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
|
||||
Type: database.ProvisionerJobTypeTemplateVersionImport,
|
||||
OrganizationID: org.ID,
|
||||
})
|
||||
resource = dbgen.WorkspaceResource(t, db, database.WorkspaceResource{JobID: job.ID})
|
||||
agent = dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{ResourceID: resource.ID})
|
||||
)
|
||||
|
||||
ids := make([]uuid.UUID, len(tc.validSubagent))
|
||||
names := make([]string, len(tc.validSubagent))
|
||||
workspaceFolders := make([]string, len(tc.validSubagent))
|
||||
configPaths := make([]string, len(tc.validSubagent))
|
||||
subagentIDs := make([]uuid.UUID, len(tc.validSubagent))
|
||||
|
||||
for i, valid := range tc.validSubagent {
|
||||
ids[i] = uuid.New()
|
||||
names[i] = fmt.Sprintf("test-devcontainer-%d", i)
|
||||
workspaceFolders[i] = fmt.Sprintf("/workspace%d", i)
|
||||
configPaths[i] = fmt.Sprintf("/workspace%d/.devcontainer/devcontainer.json", i)
|
||||
|
||||
if valid {
|
||||
subagentIDs[i] = dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{
|
||||
ResourceID: resource.ID,
|
||||
ParentID: uuid.NullUUID{UUID: agent.ID, Valid: true},
|
||||
}).ID
|
||||
} else {
|
||||
subagentIDs[i] = uuid.Nil
|
||||
}
|
||||
}
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
// Given: We insert multiple devcontainer records.
|
||||
devcontainers, err := db.InsertWorkspaceAgentDevcontainers(ctx, database.InsertWorkspaceAgentDevcontainersParams{
|
||||
WorkspaceAgentID: agent.ID,
|
||||
CreatedAt: dbtime.Now(),
|
||||
ID: ids,
|
||||
Name: names,
|
||||
WorkspaceFolder: workspaceFolders,
|
||||
ConfigPath: configPaths,
|
||||
SubagentID: subagentIDs,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, devcontainers, len(tc.validSubagent))
|
||||
|
||||
// Then: Verify each devcontainer has the correct SubagentID validity.
|
||||
// - When we pass `uuid.Nil`, we get a `uuid.NullUUID{Valid: false}`
|
||||
// - When we pass a valid UUID, we get a `uuid.NullUUID{Valid: true}`
|
||||
for i, valid := range tc.validSubagent {
|
||||
require.Equal(t, valid, devcontainers[i].SubagentID.Valid, "devcontainer %d: subagent_id validity mismatch", i)
|
||||
if valid {
|
||||
require.Equal(t, subagentIDs[i], devcontainers[i].SubagentID.UUID, "devcontainer %d: subagent_id UUID mismatch", i)
|
||||
}
|
||||
}
|
||||
|
||||
// Perform the same check on data returned by
|
||||
// `GetWorkspaceAgentDevcontainersByAgentID` to ensure the fix is at
|
||||
// the data storage layer, instead of just at a query level.
|
||||
fetched, err := db.GetWorkspaceAgentDevcontainersByAgentID(ctx, agent.ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, fetched, len(tc.validSubagent))
|
||||
|
||||
// Sort fetched by name to ensure consistent ordering for comparison.
|
||||
slices.SortFunc(fetched, func(a, b database.WorkspaceAgentDevcontainer) int {
|
||||
return strings.Compare(a.Name, b.Name)
|
||||
})
|
||||
|
||||
for i, valid := range tc.validSubagent {
|
||||
require.Equal(t, valid, fetched[i].SubagentID.Valid, "fetched devcontainer %d: subagent_id validity mismatch", i)
|
||||
if valid {
|
||||
require.Equal(t, subagentIDs[i], fetched[i].SubagentID.UUID, "fetched devcontainer %d: subagent_id UUID mismatch", i)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
+192
-304
@@ -1980,6 +1980,109 @@ func (q *sqlQuerier) InsertAuditLog(ctx context.Context, arg InsertAuditLogParam
|
||||
return i, err
|
||||
}
|
||||
|
||||
const deleteBoundaryUsageStatsByReplicaID = `-- name: DeleteBoundaryUsageStatsByReplicaID :exec
|
||||
DELETE FROM boundary_usage_stats WHERE replica_id = $1
|
||||
`
|
||||
|
||||
// Deletes boundary usage statistics for a specific replica.
|
||||
func (q *sqlQuerier) DeleteBoundaryUsageStatsByReplicaID(ctx context.Context, replicaID uuid.UUID) error {
|
||||
_, err := q.db.ExecContext(ctx, deleteBoundaryUsageStatsByReplicaID, replicaID)
|
||||
return err
|
||||
}
|
||||
|
||||
const getBoundaryUsageSummary = `-- name: GetBoundaryUsageSummary :one
|
||||
SELECT
|
||||
COALESCE(SUM(unique_workspaces_count), 0)::bigint AS unique_workspaces,
|
||||
COALESCE(SUM(unique_users_count), 0)::bigint AS unique_users,
|
||||
COALESCE(SUM(allowed_requests), 0)::bigint AS allowed_requests,
|
||||
COALESCE(SUM(denied_requests), 0)::bigint AS denied_requests
|
||||
FROM boundary_usage_stats
|
||||
WHERE window_start >= NOW() - ($1::bigint || ' ms')::interval
|
||||
`
|
||||
|
||||
type GetBoundaryUsageSummaryRow struct {
|
||||
UniqueWorkspaces int64 `db:"unique_workspaces" json:"unique_workspaces"`
|
||||
UniqueUsers int64 `db:"unique_users" json:"unique_users"`
|
||||
AllowedRequests int64 `db:"allowed_requests" json:"allowed_requests"`
|
||||
DeniedRequests int64 `db:"denied_requests" json:"denied_requests"`
|
||||
}
|
||||
|
||||
// Aggregates boundary usage statistics across all replicas. Filters to only
|
||||
// include data where window_start is within the given interval to exclude
|
||||
// stale data.
|
||||
func (q *sqlQuerier) GetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (GetBoundaryUsageSummaryRow, error) {
|
||||
row := q.db.QueryRowContext(ctx, getBoundaryUsageSummary, maxStalenessMs)
|
||||
var i GetBoundaryUsageSummaryRow
|
||||
err := row.Scan(
|
||||
&i.UniqueWorkspaces,
|
||||
&i.UniqueUsers,
|
||||
&i.AllowedRequests,
|
||||
&i.DeniedRequests,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const resetBoundaryUsageStats = `-- name: ResetBoundaryUsageStats :exec
|
||||
DELETE FROM boundary_usage_stats
|
||||
`
|
||||
|
||||
// Deletes all boundary usage statistics. Called after telemetry reports the
|
||||
// aggregated stats. Each replica will insert a fresh row on its next flush.
|
||||
func (q *sqlQuerier) ResetBoundaryUsageStats(ctx context.Context) error {
|
||||
_, err := q.db.ExecContext(ctx, resetBoundaryUsageStats)
|
||||
return err
|
||||
}
|
||||
|
||||
const upsertBoundaryUsageStats = `-- name: UpsertBoundaryUsageStats :one
|
||||
INSERT INTO boundary_usage_stats (
|
||||
replica_id,
|
||||
unique_workspaces_count,
|
||||
unique_users_count,
|
||||
allowed_requests,
|
||||
denied_requests,
|
||||
window_start,
|
||||
updated_at
|
||||
) VALUES (
|
||||
$1,
|
||||
$2,
|
||||
$3,
|
||||
$4,
|
||||
$5,
|
||||
NOW(),
|
||||
NOW()
|
||||
) ON CONFLICT (replica_id) DO UPDATE SET
|
||||
unique_workspaces_count = EXCLUDED.unique_workspaces_count,
|
||||
unique_users_count = EXCLUDED.unique_users_count,
|
||||
allowed_requests = EXCLUDED.allowed_requests,
|
||||
denied_requests = EXCLUDED.denied_requests,
|
||||
updated_at = NOW()
|
||||
RETURNING (xmax = 0) AS new_period
|
||||
`
|
||||
|
||||
type UpsertBoundaryUsageStatsParams struct {
|
||||
ReplicaID uuid.UUID `db:"replica_id" json:"replica_id"`
|
||||
UniqueWorkspacesCount int64 `db:"unique_workspaces_count" json:"unique_workspaces_count"`
|
||||
UniqueUsersCount int64 `db:"unique_users_count" json:"unique_users_count"`
|
||||
AllowedRequests int64 `db:"allowed_requests" json:"allowed_requests"`
|
||||
DeniedRequests int64 `db:"denied_requests" json:"denied_requests"`
|
||||
}
|
||||
|
||||
// Upserts boundary usage statistics for a replica. All values are replaced with
|
||||
// the current in-memory state. Returns true if this was an insert (new period),
|
||||
// false if update.
|
||||
func (q *sqlQuerier) UpsertBoundaryUsageStats(ctx context.Context, arg UpsertBoundaryUsageStatsParams) (bool, error) {
|
||||
row := q.db.QueryRowContext(ctx, upsertBoundaryUsageStats,
|
||||
arg.ReplicaID,
|
||||
arg.UniqueWorkspacesCount,
|
||||
arg.UniqueUsersCount,
|
||||
arg.AllowedRequests,
|
||||
arg.DeniedRequests,
|
||||
)
|
||||
var new_period bool
|
||||
err := row.Scan(&new_period)
|
||||
return new_period, err
|
||||
}
|
||||
|
||||
const countConnectionLogs = `-- name: CountConnectionLogs :one
|
||||
SELECT
|
||||
COUNT(*) AS count
|
||||
@@ -12634,22 +12737,6 @@ func (q *sqlQuerier) CleanTailnetTunnels(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteAllTailnetClientSubscriptions = `-- name: DeleteAllTailnetClientSubscriptions :exec
|
||||
DELETE
|
||||
FROM tailnet_client_subscriptions
|
||||
WHERE client_id = $1 and coordinator_id = $2
|
||||
`
|
||||
|
||||
type DeleteAllTailnetClientSubscriptionsParams struct {
|
||||
ClientID uuid.UUID `db:"client_id" json:"client_id"`
|
||||
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) DeleteAllTailnetClientSubscriptions(ctx context.Context, arg DeleteAllTailnetClientSubscriptionsParams) error {
|
||||
_, err := q.db.ExecContext(ctx, deleteAllTailnetClientSubscriptions, arg.ClientID, arg.CoordinatorID)
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteAllTailnetTunnels = `-- name: DeleteAllTailnetTunnels :exec
|
||||
DELETE
|
||||
FROM tailnet_tunnels
|
||||
@@ -12666,82 +12753,6 @@ func (q *sqlQuerier) DeleteAllTailnetTunnels(ctx context.Context, arg DeleteAllT
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteCoordinator = `-- name: DeleteCoordinator :exec
|
||||
DELETE
|
||||
FROM tailnet_coordinators
|
||||
WHERE id = $1
|
||||
`
|
||||
|
||||
func (q *sqlQuerier) DeleteCoordinator(ctx context.Context, id uuid.UUID) error {
|
||||
_, err := q.db.ExecContext(ctx, deleteCoordinator, id)
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteTailnetAgent = `-- name: DeleteTailnetAgent :one
|
||||
DELETE
|
||||
FROM tailnet_agents
|
||||
WHERE id = $1 and coordinator_id = $2
|
||||
RETURNING id, coordinator_id
|
||||
`
|
||||
|
||||
type DeleteTailnetAgentParams struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
||||
}
|
||||
|
||||
type DeleteTailnetAgentRow struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) DeleteTailnetAgent(ctx context.Context, arg DeleteTailnetAgentParams) (DeleteTailnetAgentRow, error) {
|
||||
row := q.db.QueryRowContext(ctx, deleteTailnetAgent, arg.ID, arg.CoordinatorID)
|
||||
var i DeleteTailnetAgentRow
|
||||
err := row.Scan(&i.ID, &i.CoordinatorID)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const deleteTailnetClient = `-- name: DeleteTailnetClient :one
|
||||
DELETE
|
||||
FROM tailnet_clients
|
||||
WHERE id = $1 and coordinator_id = $2
|
||||
RETURNING id, coordinator_id
|
||||
`
|
||||
|
||||
type DeleteTailnetClientParams struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
||||
}
|
||||
|
||||
type DeleteTailnetClientRow struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) DeleteTailnetClient(ctx context.Context, arg DeleteTailnetClientParams) (DeleteTailnetClientRow, error) {
|
||||
row := q.db.QueryRowContext(ctx, deleteTailnetClient, arg.ID, arg.CoordinatorID)
|
||||
var i DeleteTailnetClientRow
|
||||
err := row.Scan(&i.ID, &i.CoordinatorID)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const deleteTailnetClientSubscription = `-- name: DeleteTailnetClientSubscription :exec
|
||||
DELETE
|
||||
FROM tailnet_client_subscriptions
|
||||
WHERE client_id = $1 and agent_id = $2 and coordinator_id = $3
|
||||
`
|
||||
|
||||
type DeleteTailnetClientSubscriptionParams struct {
|
||||
ClientID uuid.UUID `db:"client_id" json:"client_id"`
|
||||
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
||||
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) DeleteTailnetClientSubscription(ctx context.Context, arg DeleteTailnetClientSubscriptionParams) error {
|
||||
_, err := q.db.ExecContext(ctx, deleteTailnetClientSubscription, arg.ClientID, arg.AgentID, arg.CoordinatorID)
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteTailnetPeer = `-- name: DeleteTailnetPeer :one
|
||||
DELETE
|
||||
FROM tailnet_peers
|
||||
@@ -12792,39 +12803,6 @@ func (q *sqlQuerier) DeleteTailnetTunnel(ctx context.Context, arg DeleteTailnetT
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getAllTailnetAgents = `-- name: GetAllTailnetAgents :many
|
||||
SELECT id, coordinator_id, updated_at, node
|
||||
FROM tailnet_agents
|
||||
`
|
||||
|
||||
func (q *sqlQuerier) GetAllTailnetAgents(ctx context.Context) ([]TailnetAgent, error) {
|
||||
rows, err := q.db.QueryContext(ctx, getAllTailnetAgents)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []TailnetAgent
|
||||
for rows.Next() {
|
||||
var i TailnetAgent
|
||||
if err := rows.Scan(
|
||||
&i.ID,
|
||||
&i.CoordinatorID,
|
||||
&i.UpdatedAt,
|
||||
&i.Node,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getAllTailnetCoordinators = `-- name: GetAllTailnetCoordinators :many
|
||||
|
||||
SELECT id, heartbeat_at FROM tailnet_coordinators
|
||||
@@ -12919,78 +12897,6 @@ func (q *sqlQuerier) GetAllTailnetTunnels(ctx context.Context) ([]TailnetTunnel,
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getTailnetAgents = `-- name: GetTailnetAgents :many
|
||||
SELECT id, coordinator_id, updated_at, node
|
||||
FROM tailnet_agents
|
||||
WHERE id = $1
|
||||
`
|
||||
|
||||
func (q *sqlQuerier) GetTailnetAgents(ctx context.Context, id uuid.UUID) ([]TailnetAgent, error) {
|
||||
rows, err := q.db.QueryContext(ctx, getTailnetAgents, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []TailnetAgent
|
||||
for rows.Next() {
|
||||
var i TailnetAgent
|
||||
if err := rows.Scan(
|
||||
&i.ID,
|
||||
&i.CoordinatorID,
|
||||
&i.UpdatedAt,
|
||||
&i.Node,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getTailnetClientsForAgent = `-- name: GetTailnetClientsForAgent :many
|
||||
SELECT id, coordinator_id, updated_at, node
|
||||
FROM tailnet_clients
|
||||
WHERE id IN (
|
||||
SELECT tailnet_client_subscriptions.client_id
|
||||
FROM tailnet_client_subscriptions
|
||||
WHERE tailnet_client_subscriptions.agent_id = $1
|
||||
)
|
||||
`
|
||||
|
||||
func (q *sqlQuerier) GetTailnetClientsForAgent(ctx context.Context, agentID uuid.UUID) ([]TailnetClient, error) {
|
||||
rows, err := q.db.QueryContext(ctx, getTailnetClientsForAgent, agentID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []TailnetClient
|
||||
for rows.Next() {
|
||||
var i TailnetClient
|
||||
if err := rows.Scan(
|
||||
&i.ID,
|
||||
&i.CoordinatorID,
|
||||
&i.UpdatedAt,
|
||||
&i.Node,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getTailnetPeers = `-- name: GetTailnetPeers :many
|
||||
SELECT id, coordinator_id, updated_at, node, status FROM tailnet_peers WHERE id = $1
|
||||
`
|
||||
@@ -13133,109 +13039,6 @@ func (q *sqlQuerier) UpdateTailnetPeerStatusByCoordinator(ctx context.Context, a
|
||||
return err
|
||||
}
|
||||
|
||||
const upsertTailnetAgent = `-- name: UpsertTailnetAgent :one
|
||||
INSERT INTO
|
||||
tailnet_agents (
|
||||
id,
|
||||
coordinator_id,
|
||||
node,
|
||||
updated_at
|
||||
)
|
||||
VALUES
|
||||
($1, $2, $3, now() at time zone 'utc')
|
||||
ON CONFLICT (id, coordinator_id)
|
||||
DO UPDATE SET
|
||||
id = $1,
|
||||
coordinator_id = $2,
|
||||
node = $3,
|
||||
updated_at = now() at time zone 'utc'
|
||||
RETURNING id, coordinator_id, updated_at, node
|
||||
`
|
||||
|
||||
type UpsertTailnetAgentParams struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
||||
Node json.RawMessage `db:"node" json:"node"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) UpsertTailnetAgent(ctx context.Context, arg UpsertTailnetAgentParams) (TailnetAgent, error) {
|
||||
row := q.db.QueryRowContext(ctx, upsertTailnetAgent, arg.ID, arg.CoordinatorID, arg.Node)
|
||||
var i TailnetAgent
|
||||
err := row.Scan(
|
||||
&i.ID,
|
||||
&i.CoordinatorID,
|
||||
&i.UpdatedAt,
|
||||
&i.Node,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const upsertTailnetClient = `-- name: UpsertTailnetClient :one
|
||||
INSERT INTO
|
||||
tailnet_clients (
|
||||
id,
|
||||
coordinator_id,
|
||||
node,
|
||||
updated_at
|
||||
)
|
||||
VALUES
|
||||
($1, $2, $3, now() at time zone 'utc')
|
||||
ON CONFLICT (id, coordinator_id)
|
||||
DO UPDATE SET
|
||||
id = $1,
|
||||
coordinator_id = $2,
|
||||
node = $3,
|
||||
updated_at = now() at time zone 'utc'
|
||||
RETURNING id, coordinator_id, updated_at, node
|
||||
`
|
||||
|
||||
type UpsertTailnetClientParams struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
||||
Node json.RawMessage `db:"node" json:"node"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) UpsertTailnetClient(ctx context.Context, arg UpsertTailnetClientParams) (TailnetClient, error) {
|
||||
row := q.db.QueryRowContext(ctx, upsertTailnetClient, arg.ID, arg.CoordinatorID, arg.Node)
|
||||
var i TailnetClient
|
||||
err := row.Scan(
|
||||
&i.ID,
|
||||
&i.CoordinatorID,
|
||||
&i.UpdatedAt,
|
||||
&i.Node,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const upsertTailnetClientSubscription = `-- name: UpsertTailnetClientSubscription :exec
|
||||
INSERT INTO
|
||||
tailnet_client_subscriptions (
|
||||
client_id,
|
||||
coordinator_id,
|
||||
agent_id,
|
||||
updated_at
|
||||
)
|
||||
VALUES
|
||||
($1, $2, $3, now() at time zone 'utc')
|
||||
ON CONFLICT (client_id, coordinator_id, agent_id)
|
||||
DO UPDATE SET
|
||||
client_id = $1,
|
||||
coordinator_id = $2,
|
||||
agent_id = $3,
|
||||
updated_at = now() at time zone 'utc'
|
||||
`
|
||||
|
||||
type UpsertTailnetClientSubscriptionParams struct {
|
||||
ClientID uuid.UUID `db:"client_id" json:"client_id"`
|
||||
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
||||
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) UpsertTailnetClientSubscription(ctx context.Context, arg UpsertTailnetClientSubscriptionParams) error {
|
||||
_, err := q.db.ExecContext(ctx, upsertTailnetClientSubscription, arg.ClientID, arg.CoordinatorID, arg.AgentID)
|
||||
return err
|
||||
}
|
||||
|
||||
const upsertTailnetCoordinator = `-- name: UpsertTailnetCoordinator :one
|
||||
INSERT INTO
|
||||
tailnet_coordinators (
|
||||
@@ -13483,6 +13286,22 @@ func (q *sqlQuerier) GetTaskByWorkspaceID(ctx context.Context, workspaceID uuid.
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getTaskSnapshot = `-- name: GetTaskSnapshot :one
|
||||
SELECT
|
||||
task_id, log_snapshot, log_snapshot_created_at
|
||||
FROM
|
||||
task_snapshots
|
||||
WHERE
|
||||
task_id = $1
|
||||
`
|
||||
|
||||
func (q *sqlQuerier) GetTaskSnapshot(ctx context.Context, taskID uuid.UUID) (TaskSnapshot, error) {
|
||||
row := q.db.QueryRowContext(ctx, getTaskSnapshot, taskID)
|
||||
var i TaskSnapshot
|
||||
err := row.Scan(&i.TaskID, &i.LogSnapshot, &i.LogSnapshotCreatedAt)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const insertTask = `-- name: InsertTask :one
|
||||
INSERT INTO tasks
|
||||
(id, organization_id, owner_id, name, display_name, workspace_id, template_version_id, template_parameters, prompt, created_at)
|
||||
@@ -13673,6 +13492,29 @@ func (q *sqlQuerier) UpdateTaskWorkspaceID(ctx context.Context, arg UpdateTaskWo
|
||||
return i, err
|
||||
}
|
||||
|
||||
const upsertTaskSnapshot = `-- name: UpsertTaskSnapshot :exec
|
||||
INSERT INTO
|
||||
task_snapshots (task_id, log_snapshot, log_snapshot_created_at)
|
||||
VALUES
|
||||
($1, $2, $3)
|
||||
ON CONFLICT
|
||||
(task_id)
|
||||
DO UPDATE SET
|
||||
log_snapshot = EXCLUDED.log_snapshot,
|
||||
log_snapshot_created_at = EXCLUDED.log_snapshot_created_at
|
||||
`
|
||||
|
||||
type UpsertTaskSnapshotParams struct {
|
||||
TaskID uuid.UUID `db:"task_id" json:"task_id"`
|
||||
LogSnapshot json.RawMessage `db:"log_snapshot" json:"log_snapshot"`
|
||||
LogSnapshotCreatedAt time.Time `db:"log_snapshot_created_at" json:"log_snapshot_created_at"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) UpsertTaskSnapshot(ctx context.Context, arg UpsertTaskSnapshotParams) error {
|
||||
_, err := q.db.ExecContext(ctx, upsertTaskSnapshot, arg.TaskID, arg.LogSnapshot, arg.LogSnapshotCreatedAt)
|
||||
return err
|
||||
}
|
||||
|
||||
const upsertTaskWorkspaceApp = `-- name: UpsertTaskWorkspaceApp :one
|
||||
INSERT INTO task_workspace_apps
|
||||
(task_id, workspace_build_number, workspace_agent_id, workspace_app_id)
|
||||
@@ -17371,7 +17213,7 @@ func (q *sqlQuerier) ValidateUserIDs(ctx context.Context, userIds []uuid.UUID) (
|
||||
|
||||
const getWorkspaceAgentDevcontainersByAgentID = `-- name: GetWorkspaceAgentDevcontainersByAgentID :many
|
||||
SELECT
|
||||
id, workspace_agent_id, created_at, workspace_folder, config_path, name
|
||||
id, workspace_agent_id, created_at, workspace_folder, config_path, name, subagent_id
|
||||
FROM
|
||||
workspace_agent_devcontainers
|
||||
WHERE
|
||||
@@ -17396,6 +17238,7 @@ func (q *sqlQuerier) GetWorkspaceAgentDevcontainersByAgentID(ctx context.Context
|
||||
&i.WorkspaceFolder,
|
||||
&i.ConfigPath,
|
||||
&i.Name,
|
||||
&i.SubagentID,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -17412,15 +17255,16 @@ func (q *sqlQuerier) GetWorkspaceAgentDevcontainersByAgentID(ctx context.Context
|
||||
|
||||
const insertWorkspaceAgentDevcontainers = `-- name: InsertWorkspaceAgentDevcontainers :many
|
||||
INSERT INTO
|
||||
workspace_agent_devcontainers (workspace_agent_id, created_at, id, name, workspace_folder, config_path)
|
||||
workspace_agent_devcontainers (workspace_agent_id, created_at, id, name, workspace_folder, config_path, subagent_id)
|
||||
SELECT
|
||||
$1::uuid AS workspace_agent_id,
|
||||
$2::timestamptz AS created_at,
|
||||
unnest($3::uuid[]) AS id,
|
||||
unnest($4::text[]) AS name,
|
||||
unnest($5::text[]) AS workspace_folder,
|
||||
unnest($6::text[]) AS config_path
|
||||
RETURNING workspace_agent_devcontainers.id, workspace_agent_devcontainers.workspace_agent_id, workspace_agent_devcontainers.created_at, workspace_agent_devcontainers.workspace_folder, workspace_agent_devcontainers.config_path, workspace_agent_devcontainers.name
|
||||
unnest($6::text[]) AS config_path,
|
||||
NULLIF(unnest($7::uuid[]), '00000000-0000-0000-0000-000000000000')::uuid AS subagent_id
|
||||
RETURNING workspace_agent_devcontainers.id, workspace_agent_devcontainers.workspace_agent_id, workspace_agent_devcontainers.created_at, workspace_agent_devcontainers.workspace_folder, workspace_agent_devcontainers.config_path, workspace_agent_devcontainers.name, workspace_agent_devcontainers.subagent_id
|
||||
`
|
||||
|
||||
type InsertWorkspaceAgentDevcontainersParams struct {
|
||||
@@ -17430,6 +17274,7 @@ type InsertWorkspaceAgentDevcontainersParams struct {
|
||||
Name []string `db:"name" json:"name"`
|
||||
WorkspaceFolder []string `db:"workspace_folder" json:"workspace_folder"`
|
||||
ConfigPath []string `db:"config_path" json:"config_path"`
|
||||
SubagentID []uuid.UUID `db:"subagent_id" json:"subagent_id"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) InsertWorkspaceAgentDevcontainers(ctx context.Context, arg InsertWorkspaceAgentDevcontainersParams) ([]WorkspaceAgentDevcontainer, error) {
|
||||
@@ -17440,6 +17285,7 @@ func (q *sqlQuerier) InsertWorkspaceAgentDevcontainers(ctx context.Context, arg
|
||||
pq.Array(arg.Name),
|
||||
pq.Array(arg.WorkspaceFolder),
|
||||
pq.Array(arg.ConfigPath),
|
||||
pq.Array(arg.SubagentID),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -17455,6 +17301,7 @@ func (q *sqlQuerier) InsertWorkspaceAgentDevcontainers(ctx context.Context, arg
|
||||
&i.WorkspaceFolder,
|
||||
&i.ConfigPath,
|
||||
&i.Name,
|
||||
&i.SubagentID,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -17954,6 +17801,47 @@ func (q *sqlQuerier) UpdateVolumeResourceMonitor(ctx context.Context, arg Update
|
||||
return err
|
||||
}
|
||||
|
||||
const batchUpdateWorkspaceAgentMetadata = `-- name: BatchUpdateWorkspaceAgentMetadata :exec
|
||||
WITH metadata AS (
|
||||
SELECT
|
||||
unnest($1::uuid[]) AS workspace_agent_id,
|
||||
unnest($2::text[]) AS key,
|
||||
unnest($3::text[]) AS value,
|
||||
unnest($4::text[]) AS error,
|
||||
unnest($5::timestamptz[]) AS collected_at
|
||||
)
|
||||
UPDATE
|
||||
workspace_agent_metadata wam
|
||||
SET
|
||||
value = m.value,
|
||||
error = m.error,
|
||||
collected_at = m.collected_at
|
||||
FROM
|
||||
metadata m
|
||||
WHERE
|
||||
wam.workspace_agent_id = m.workspace_agent_id
|
||||
AND wam.key = m.key
|
||||
`
|
||||
|
||||
type BatchUpdateWorkspaceAgentMetadataParams struct {
|
||||
WorkspaceAgentID []uuid.UUID `db:"workspace_agent_id" json:"workspace_agent_id"`
|
||||
Key []string `db:"key" json:"key"`
|
||||
Value []string `db:"value" json:"value"`
|
||||
Error []string `db:"error" json:"error"`
|
||||
CollectedAt []time.Time `db:"collected_at" json:"collected_at"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) BatchUpdateWorkspaceAgentMetadata(ctx context.Context, arg BatchUpdateWorkspaceAgentMetadataParams) error {
|
||||
_, err := q.db.ExecContext(ctx, batchUpdateWorkspaceAgentMetadata,
|
||||
pq.Array(arg.WorkspaceAgentID),
|
||||
pq.Array(arg.Key),
|
||||
pq.Array(arg.Value),
|
||||
pq.Array(arg.Error),
|
||||
pq.Array(arg.CollectedAt),
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteOldWorkspaceAgentLogs = `-- name: DeleteOldWorkspaceAgentLogs :execrows
|
||||
WITH
|
||||
latest_builds AS (
|
||||
|
||||
@@ -0,0 +1,48 @@
|
||||
-- name: UpsertBoundaryUsageStats :one
|
||||
-- Upserts boundary usage statistics for a replica. All values are replaced with
|
||||
-- the current in-memory state. Returns true if this was an insert (new period),
|
||||
-- false if update.
|
||||
INSERT INTO boundary_usage_stats (
|
||||
replica_id,
|
||||
unique_workspaces_count,
|
||||
unique_users_count,
|
||||
allowed_requests,
|
||||
denied_requests,
|
||||
window_start,
|
||||
updated_at
|
||||
) VALUES (
|
||||
@replica_id,
|
||||
@unique_workspaces_count,
|
||||
@unique_users_count,
|
||||
@allowed_requests,
|
||||
@denied_requests,
|
||||
NOW(),
|
||||
NOW()
|
||||
) ON CONFLICT (replica_id) DO UPDATE SET
|
||||
unique_workspaces_count = EXCLUDED.unique_workspaces_count,
|
||||
unique_users_count = EXCLUDED.unique_users_count,
|
||||
allowed_requests = EXCLUDED.allowed_requests,
|
||||
denied_requests = EXCLUDED.denied_requests,
|
||||
updated_at = NOW()
|
||||
RETURNING (xmax = 0) AS new_period;
|
||||
|
||||
-- name: GetBoundaryUsageSummary :one
|
||||
-- Aggregates boundary usage statistics across all replicas. Filters to only
|
||||
-- include data where window_start is within the given interval to exclude
|
||||
-- stale data.
|
||||
SELECT
|
||||
COALESCE(SUM(unique_workspaces_count), 0)::bigint AS unique_workspaces,
|
||||
COALESCE(SUM(unique_users_count), 0)::bigint AS unique_users,
|
||||
COALESCE(SUM(allowed_requests), 0)::bigint AS allowed_requests,
|
||||
COALESCE(SUM(denied_requests), 0)::bigint AS denied_requests
|
||||
FROM boundary_usage_stats
|
||||
WHERE window_start >= NOW() - (@max_staleness_ms::bigint || ' ms')::interval;
|
||||
|
||||
-- name: ResetBoundaryUsageStats :exec
|
||||
-- Deletes all boundary usage statistics. Called after telemetry reports the
|
||||
-- aggregated stats. Each replica will insert a fresh row on its next flush.
|
||||
DELETE FROM boundary_usage_stats;
|
||||
|
||||
-- name: DeleteBoundaryUsageStatsByReplicaID :exec
|
||||
-- Deletes boundary usage statistics for a specific replica.
|
||||
DELETE FROM boundary_usage_stats WHERE replica_id = @replica_id;
|
||||
@@ -1,102 +1,3 @@
|
||||
-- name: UpsertTailnetClient :one
|
||||
INSERT INTO
|
||||
tailnet_clients (
|
||||
id,
|
||||
coordinator_id,
|
||||
node,
|
||||
updated_at
|
||||
)
|
||||
VALUES
|
||||
($1, $2, $3, now() at time zone 'utc')
|
||||
ON CONFLICT (id, coordinator_id)
|
||||
DO UPDATE SET
|
||||
id = $1,
|
||||
coordinator_id = $2,
|
||||
node = $3,
|
||||
updated_at = now() at time zone 'utc'
|
||||
RETURNING *;
|
||||
|
||||
-- name: UpsertTailnetClientSubscription :exec
|
||||
INSERT INTO
|
||||
tailnet_client_subscriptions (
|
||||
client_id,
|
||||
coordinator_id,
|
||||
agent_id,
|
||||
updated_at
|
||||
)
|
||||
VALUES
|
||||
($1, $2, $3, now() at time zone 'utc')
|
||||
ON CONFLICT (client_id, coordinator_id, agent_id)
|
||||
DO UPDATE SET
|
||||
client_id = $1,
|
||||
coordinator_id = $2,
|
||||
agent_id = $3,
|
||||
updated_at = now() at time zone 'utc';
|
||||
|
||||
-- name: UpsertTailnetAgent :one
|
||||
INSERT INTO
|
||||
tailnet_agents (
|
||||
id,
|
||||
coordinator_id,
|
||||
node,
|
||||
updated_at
|
||||
)
|
||||
VALUES
|
||||
($1, $2, $3, now() at time zone 'utc')
|
||||
ON CONFLICT (id, coordinator_id)
|
||||
DO UPDATE SET
|
||||
id = $1,
|
||||
coordinator_id = $2,
|
||||
node = $3,
|
||||
updated_at = now() at time zone 'utc'
|
||||
RETURNING *;
|
||||
|
||||
|
||||
-- name: DeleteTailnetClient :one
|
||||
DELETE
|
||||
FROM tailnet_clients
|
||||
WHERE id = $1 and coordinator_id = $2
|
||||
RETURNING id, coordinator_id;
|
||||
|
||||
-- name: DeleteTailnetClientSubscription :exec
|
||||
DELETE
|
||||
FROM tailnet_client_subscriptions
|
||||
WHERE client_id = $1 and agent_id = $2 and coordinator_id = $3;
|
||||
|
||||
-- name: DeleteAllTailnetClientSubscriptions :exec
|
||||
DELETE
|
||||
FROM tailnet_client_subscriptions
|
||||
WHERE client_id = $1 and coordinator_id = $2;
|
||||
|
||||
-- name: DeleteTailnetAgent :one
|
||||
DELETE
|
||||
FROM tailnet_agents
|
||||
WHERE id = $1 and coordinator_id = $2
|
||||
RETURNING id, coordinator_id;
|
||||
|
||||
-- name: DeleteCoordinator :exec
|
||||
DELETE
|
||||
FROM tailnet_coordinators
|
||||
WHERE id = $1;
|
||||
|
||||
-- name: GetTailnetAgents :many
|
||||
SELECT *
|
||||
FROM tailnet_agents
|
||||
WHERE id = $1;
|
||||
|
||||
-- name: GetAllTailnetAgents :many
|
||||
SELECT *
|
||||
FROM tailnet_agents;
|
||||
|
||||
-- name: GetTailnetClientsForAgent :many
|
||||
SELECT *
|
||||
FROM tailnet_clients
|
||||
WHERE id IN (
|
||||
SELECT tailnet_client_subscriptions.client_id
|
||||
FROM tailnet_client_subscriptions
|
||||
WHERE tailnet_client_subscriptions.agent_id = $1
|
||||
);
|
||||
|
||||
-- name: UpsertTailnetCoordinator :one
|
||||
INSERT INTO
|
||||
tailnet_coordinators (
|
||||
|
||||
@@ -75,3 +75,22 @@ WHERE
|
||||
id = @id::uuid
|
||||
AND deleted_at IS NULL
|
||||
RETURNING *;
|
||||
|
||||
-- name: UpsertTaskSnapshot :exec
|
||||
INSERT INTO
|
||||
task_snapshots (task_id, log_snapshot, log_snapshot_created_at)
|
||||
VALUES
|
||||
($1, $2, $3)
|
||||
ON CONFLICT
|
||||
(task_id)
|
||||
DO UPDATE SET
|
||||
log_snapshot = EXCLUDED.log_snapshot,
|
||||
log_snapshot_created_at = EXCLUDED.log_snapshot_created_at;
|
||||
|
||||
-- name: GetTaskSnapshot :one
|
||||
SELECT
|
||||
*
|
||||
FROM
|
||||
task_snapshots
|
||||
WHERE
|
||||
task_id = $1;
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
-- name: InsertWorkspaceAgentDevcontainers :many
|
||||
INSERT INTO
|
||||
workspace_agent_devcontainers (workspace_agent_id, created_at, id, name, workspace_folder, config_path)
|
||||
workspace_agent_devcontainers (workspace_agent_id, created_at, id, name, workspace_folder, config_path, subagent_id)
|
||||
SELECT
|
||||
@workspace_agent_id::uuid AS workspace_agent_id,
|
||||
@created_at::timestamptz AS created_at,
|
||||
unnest(@id::uuid[]) AS id,
|
||||
unnest(@name::text[]) AS name,
|
||||
unnest(@workspace_folder::text[]) AS workspace_folder,
|
||||
unnest(@config_path::text[]) AS config_path
|
||||
unnest(@config_path::text[]) AS config_path,
|
||||
NULLIF(unnest(@subagent_id::uuid[]), '00000000-0000-0000-0000-000000000000')::uuid AS subagent_id
|
||||
RETURNING workspace_agent_devcontainers.*;
|
||||
|
||||
-- name: GetWorkspaceAgentDevcontainersByAgentID :many
|
||||
|
||||
@@ -142,6 +142,27 @@ WHERE
|
||||
wam.workspace_agent_id = $1
|
||||
AND wam.key = m.key;
|
||||
|
||||
-- name: BatchUpdateWorkspaceAgentMetadata :exec
|
||||
WITH metadata AS (
|
||||
SELECT
|
||||
unnest(sqlc.arg('workspace_agent_id')::uuid[]) AS workspace_agent_id,
|
||||
unnest(sqlc.arg('key')::text[]) AS key,
|
||||
unnest(sqlc.arg('value')::text[]) AS value,
|
||||
unnest(sqlc.arg('error')::text[]) AS error,
|
||||
unnest(sqlc.arg('collected_at')::timestamptz[]) AS collected_at
|
||||
)
|
||||
UPDATE
|
||||
workspace_agent_metadata wam
|
||||
SET
|
||||
value = m.value,
|
||||
error = m.error,
|
||||
collected_at = m.collected_at
|
||||
FROM
|
||||
metadata m
|
||||
WHERE
|
||||
wam.workspace_agent_id = m.workspace_agent_id
|
||||
AND wam.key = m.key;
|
||||
|
||||
-- name: GetWorkspaceAgentMetadata :many
|
||||
SELECT
|
||||
*
|
||||
|
||||
@@ -13,6 +13,7 @@ const (
|
||||
UniqueAibridgeUserPromptsPkey UniqueConstraint = "aibridge_user_prompts_pkey" // ALTER TABLE ONLY aibridge_user_prompts ADD CONSTRAINT aibridge_user_prompts_pkey PRIMARY KEY (id);
|
||||
UniqueAPIKeysPkey UniqueConstraint = "api_keys_pkey" // ALTER TABLE ONLY api_keys ADD CONSTRAINT api_keys_pkey PRIMARY KEY (id);
|
||||
UniqueAuditLogsPkey UniqueConstraint = "audit_logs_pkey" // ALTER TABLE ONLY audit_logs ADD CONSTRAINT audit_logs_pkey PRIMARY KEY (id);
|
||||
UniqueBoundaryUsageStatsPkey UniqueConstraint = "boundary_usage_stats_pkey" // ALTER TABLE ONLY boundary_usage_stats ADD CONSTRAINT boundary_usage_stats_pkey PRIMARY KEY (replica_id);
|
||||
UniqueConnectionLogsPkey UniqueConstraint = "connection_logs_pkey" // ALTER TABLE ONLY connection_logs ADD CONSTRAINT connection_logs_pkey PRIMARY KEY (id);
|
||||
UniqueCryptoKeysPkey UniqueConstraint = "crypto_keys_pkey" // ALTER TABLE ONLY crypto_keys ADD CONSTRAINT crypto_keys_pkey PRIMARY KEY (feature, sequence);
|
||||
UniqueCustomRolesUniqueKey UniqueConstraint = "custom_roles_unique_key" // ALTER TABLE ONLY custom_roles ADD CONSTRAINT custom_roles_unique_key UNIQUE (name, organization_id);
|
||||
@@ -53,9 +54,6 @@ const (
|
||||
UniqueProvisionerJobsPkey UniqueConstraint = "provisioner_jobs_pkey" // ALTER TABLE ONLY provisioner_jobs ADD CONSTRAINT provisioner_jobs_pkey PRIMARY KEY (id);
|
||||
UniqueProvisionerKeysPkey UniqueConstraint = "provisioner_keys_pkey" // ALTER TABLE ONLY provisioner_keys ADD CONSTRAINT provisioner_keys_pkey PRIMARY KEY (id);
|
||||
UniqueSiteConfigsKeyKey UniqueConstraint = "site_configs_key_key" // ALTER TABLE ONLY site_configs ADD CONSTRAINT site_configs_key_key UNIQUE (key);
|
||||
UniqueTailnetAgentsPkey UniqueConstraint = "tailnet_agents_pkey" // ALTER TABLE ONLY tailnet_agents ADD CONSTRAINT tailnet_agents_pkey PRIMARY KEY (id, coordinator_id);
|
||||
UniqueTailnetClientSubscriptionsPkey UniqueConstraint = "tailnet_client_subscriptions_pkey" // ALTER TABLE ONLY tailnet_client_subscriptions ADD CONSTRAINT tailnet_client_subscriptions_pkey PRIMARY KEY (client_id, coordinator_id, agent_id);
|
||||
UniqueTailnetClientsPkey UniqueConstraint = "tailnet_clients_pkey" // ALTER TABLE ONLY tailnet_clients ADD CONSTRAINT tailnet_clients_pkey PRIMARY KEY (id, coordinator_id);
|
||||
UniqueTailnetCoordinatorsPkey UniqueConstraint = "tailnet_coordinators_pkey" // ALTER TABLE ONLY tailnet_coordinators ADD CONSTRAINT tailnet_coordinators_pkey PRIMARY KEY (id);
|
||||
UniqueTailnetPeersPkey UniqueConstraint = "tailnet_peers_pkey" // ALTER TABLE ONLY tailnet_peers ADD CONSTRAINT tailnet_peers_pkey PRIMARY KEY (id, coordinator_id);
|
||||
UniqueTailnetTunnelsPkey UniqueConstraint = "tailnet_tunnels_pkey" // ALTER TABLE ONLY tailnet_tunnels ADD CONSTRAINT tailnet_tunnels_pkey PRIMARY KEY (coordinator_id, src_id, dst_id);
|
||||
|
||||
@@ -162,6 +162,12 @@ func (l *Set) Errors() []string {
|
||||
return slices.Clone(l.entitlements.Errors)
|
||||
}
|
||||
|
||||
func (l *Set) Warnings() []string {
|
||||
l.entitlementsMu.RLock()
|
||||
defer l.entitlementsMu.RUnlock()
|
||||
return slices.Clone(l.entitlements.Warnings)
|
||||
}
|
||||
|
||||
func (l *Set) HasLicense() bool {
|
||||
l.entitlementsMu.RLock()
|
||||
defer l.entitlementsMu.RUnlock()
|
||||
|
||||
+71
-64
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/go-playground/validator/v10"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/coderd/httpapi/httpapiconstraints"
|
||||
"github.com/coder/coder/v2/coderd/tracing"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
@@ -418,79 +419,85 @@ func ServerSentEventSender(rw http.ResponseWriter, r *http.Request) (
|
||||
// open a workspace in multiple tabs, the entire UI can start to lock up.
|
||||
// WebSockets have no such limitation, no matter what HTTP protocol was used to
|
||||
// establish the connection.
|
||||
func OneWayWebSocketEventSender(rw http.ResponseWriter, r *http.Request) (
|
||||
func OneWayWebSocketEventSender(log slog.Logger) func(rw http.ResponseWriter, r *http.Request) (
|
||||
func(event codersdk.ServerSentEvent) error,
|
||||
<-chan struct{},
|
||||
error,
|
||||
) {
|
||||
ctx, cancel := context.WithCancel(r.Context())
|
||||
r = r.WithContext(ctx)
|
||||
socket, err := websocket.Accept(rw, r, nil)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, nil, xerrors.Errorf("cannot establish connection: %w", err)
|
||||
}
|
||||
go Heartbeat(ctx, socket)
|
||||
|
||||
eventC := make(chan codersdk.ServerSentEvent)
|
||||
socketErrC := make(chan websocket.CloseError, 1)
|
||||
closed := make(chan struct{})
|
||||
go func() {
|
||||
defer cancel()
|
||||
defer close(closed)
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-eventC:
|
||||
writeCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
err := wsjson.Write(writeCtx, socket, event)
|
||||
cancel()
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
_ = socket.Close(websocket.StatusInternalError, "Unable to send newest message")
|
||||
case err := <-socketErrC:
|
||||
_ = socket.Close(err.Code, err.Reason)
|
||||
case <-ctx.Done():
|
||||
_ = socket.Close(websocket.StatusNormalClosure, "Connection closed")
|
||||
}
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
// We have some tools in the UI code to help enforce one-way WebSocket
|
||||
// connections, but there's still the possibility that the client could send
|
||||
// a message when it's not supposed to. If that happens, the client likely
|
||||
// forgot to use those tools, and communication probably can't be trusted.
|
||||
// Better to just close the socket and force the UI to fix its mess
|
||||
go func() {
|
||||
_, _, err := socket.Read(ctx)
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return
|
||||
}
|
||||
return func(rw http.ResponseWriter, r *http.Request) (
|
||||
func(event codersdk.ServerSentEvent) error,
|
||||
<-chan struct{},
|
||||
error,
|
||||
) {
|
||||
ctx, cancel := context.WithCancel(r.Context())
|
||||
r = r.WithContext(ctx)
|
||||
socket, err := websocket.Accept(rw, r, nil)
|
||||
if err != nil {
|
||||
socketErrC <- websocket.CloseError{
|
||||
Code: websocket.StatusInternalError,
|
||||
Reason: "Unable to process invalid message from client",
|
||||
cancel()
|
||||
return nil, nil, xerrors.Errorf("cannot establish connection: %w", err)
|
||||
}
|
||||
go HeartbeatClose(ctx, log, cancel, socket)
|
||||
|
||||
eventC := make(chan codersdk.ServerSentEvent)
|
||||
socketErrC := make(chan websocket.CloseError, 1)
|
||||
closed := make(chan struct{})
|
||||
go func() {
|
||||
defer cancel()
|
||||
defer close(closed)
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-eventC:
|
||||
writeCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
err := wsjson.Write(writeCtx, socket, event)
|
||||
cancel()
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
_ = socket.Close(websocket.StatusInternalError, "Unable to send newest message")
|
||||
case err := <-socketErrC:
|
||||
_ = socket.Close(err.Code, err.Reason)
|
||||
case <-ctx.Done():
|
||||
_ = socket.Close(websocket.StatusNormalClosure, "Connection closed")
|
||||
}
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
socketErrC <- websocket.CloseError{
|
||||
Code: websocket.StatusProtocolError,
|
||||
Reason: "Clients cannot send messages for one-way WebSockets",
|
||||
}
|
||||
}()
|
||||
}()
|
||||
|
||||
sendEvent := func(event codersdk.ServerSentEvent) error {
|
||||
select {
|
||||
case eventC <- event:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
// We have some tools in the UI code to help enforce one-way WebSocket
|
||||
// connections, but there's still the possibility that the client could send
|
||||
// a message when it's not supposed to. If that happens, the client likely
|
||||
// forgot to use those tools, and communication probably can't be trusted.
|
||||
// Better to just close the socket and force the UI to fix its mess
|
||||
go func() {
|
||||
_, _, err := socket.Read(ctx)
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
socketErrC <- websocket.CloseError{
|
||||
Code: websocket.StatusInternalError,
|
||||
Reason: "Unable to process invalid message from client",
|
||||
}
|
||||
return
|
||||
}
|
||||
socketErrC <- websocket.CloseError{
|
||||
Code: websocket.StatusProtocolError,
|
||||
Reason: "Clients cannot send messages for one-way WebSockets",
|
||||
}
|
||||
}()
|
||||
|
||||
sendEvent := func(event codersdk.ServerSentEvent) error {
|
||||
select {
|
||||
case eventC <- event:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
|
||||
return sendEvent, closed, nil
|
||||
}
|
||||
|
||||
return sendEvent, closed, nil
|
||||
}
|
||||
|
||||
// WriteOAuth2Error writes an OAuth2-compliant error response per RFC 6749.
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog/v3/sloggers/slogtest"
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
@@ -262,7 +263,7 @@ func TestOneWayWebSocketEventSender(t *testing.T) {
|
||||
req.Proto = p.proto
|
||||
|
||||
writer := newOneWayWriter(t)
|
||||
_, _, err := httpapi.OneWayWebSocketEventSender(writer, req)
|
||||
_, _, err := httpapi.OneWayWebSocketEventSender(slogtest.Make(t, nil))(writer, req)
|
||||
require.ErrorContains(t, err, p.proto)
|
||||
}
|
||||
})
|
||||
@@ -273,7 +274,7 @@ func TestOneWayWebSocketEventSender(t *testing.T) {
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
req := newBaseRequest(ctx)
|
||||
writer := newOneWayWriter(t)
|
||||
send, _, err := httpapi.OneWayWebSocketEventSender(writer, req)
|
||||
send, _, err := httpapi.OneWayWebSocketEventSender(slogtest.Make(t, nil))(writer, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
serverPayload := codersdk.ServerSentEvent{
|
||||
@@ -299,7 +300,7 @@ func TestOneWayWebSocketEventSender(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(testutil.Context(t, testutil.WaitShort))
|
||||
req := newBaseRequest(ctx)
|
||||
writer := newOneWayWriter(t)
|
||||
_, done, err := httpapi.OneWayWebSocketEventSender(writer, req)
|
||||
_, done, err := httpapi.OneWayWebSocketEventSender(slogtest.Make(t, nil))(writer, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
successC := make(chan bool)
|
||||
@@ -323,7 +324,7 @@ func TestOneWayWebSocketEventSender(t *testing.T) {
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
req := newBaseRequest(ctx)
|
||||
writer := newOneWayWriter(t)
|
||||
_, done, err := httpapi.OneWayWebSocketEventSender(writer, req)
|
||||
_, done, err := httpapi.OneWayWebSocketEventSender(slogtest.Make(t, nil))(writer, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
successC := make(chan bool)
|
||||
@@ -353,7 +354,7 @@ func TestOneWayWebSocketEventSender(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(testutil.Context(t, testutil.WaitShort))
|
||||
req := newBaseRequest(ctx)
|
||||
writer := newOneWayWriter(t)
|
||||
send, done, err := httpapi.OneWayWebSocketEventSender(writer, req)
|
||||
send, done, err := httpapi.OneWayWebSocketEventSender(slogtest.Make(t, nil))(writer, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
successC := make(chan bool)
|
||||
@@ -394,7 +395,7 @@ func TestOneWayWebSocketEventSender(t *testing.T) {
|
||||
ctx := testutil.Context(t, timeout)
|
||||
req := newBaseRequest(ctx)
|
||||
writer := newOneWayWriter(t)
|
||||
_, _, err := httpapi.OneWayWebSocketEventSender(writer, req)
|
||||
_, _, err := httpapi.OneWayWebSocketEventSender(slogtest.Make(t, nil))(writer, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
type Result struct {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user