Compare commits
213 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 0b5dffb572 | |||
| ed64061e55 | |||
| 38f35163f5 | |||
| 99e2b33b1e | |||
| 595d5e8c62 | |||
| cd1cca4945 | |||
| 7b558c0a5b | |||
| f258a310f2 | |||
| 84e389aec0 | |||
| 75a764f780 | |||
| 8aa35c9d5c | |||
| d97fd38b35 | |||
| 6c1fe84185 | |||
| 44d7ee977f | |||
| 9bbc20011a | |||
| ecf7344d21 | |||
| fdceba32d7 | |||
| d68e2f477e | |||
| f9c5f50596 | |||
| 308f619ae5 | |||
| 31aa0fd08b | |||
| 179ea7768e | |||
| 97fda34770 | |||
| 758bd7e287 | |||
| 76dee02f99 | |||
| bf1dd581fb | |||
| 760af814d9 | |||
| cf6f9ef018 | |||
| e564e914cd | |||
| 4c4dd5c99d | |||
| 174b8b06f3 | |||
| e2928f35ee | |||
| 4ae56f2fd6 | |||
| f217c9f855 | |||
| 0d56e7066d | |||
| 6f95706f5d | |||
| 355d6eee22 | |||
| a693e2554a | |||
| b412cdd91a | |||
| 2185aea300 | |||
| f6e7976300 | |||
| 3ef31d73c5 | |||
| 929a319f09 | |||
| 197139915f | |||
| 506c0c9e66 | |||
| fbb8d5f6ab | |||
| e8e22306c1 | |||
| c246d4864d | |||
| 44ea0f106f | |||
| b3474da27b | |||
| daa67c40e8 | |||
| 1660111e92 | |||
| efac6273b7 | |||
| ee4a146400 | |||
| 405bb442d9 | |||
| b8c109ff53 | |||
| 4c1d293066 | |||
| c22769c87f | |||
| 6966a55c5a | |||
| d323decce1 | |||
| 6004982361 | |||
| 9725ea2dd8 | |||
| c055af8ddd | |||
| be63cabfad | |||
| 1dbe0d4664 | |||
| 22a67b8ee8 | |||
| 86373ead1a | |||
| d358b087ea | |||
| 3461572d0b | |||
| d0085d2dbe | |||
| 032938279e | |||
| 3e84596fc2 | |||
| 85e3e19673 | |||
| 52febdb0ef | |||
| 7134021388 | |||
| fc9cad154c | |||
| 402cd8edf4 | |||
| 758fd11aeb | |||
| 09a7ab3c60 | |||
| d3f50a07a9 | |||
| 9434940fd6 | |||
| 476cd08fa6 | |||
| 88d019c1de | |||
| c161306ed6 | |||
| 04d4634b7c | |||
| dca7f1ede4 | |||
| 0a1f3660a9 | |||
| 184ae244fd | |||
| 47abc5e190 | |||
| 02353d36d0 | |||
| 750e883540 | |||
| ad313e7298 | |||
| c7036561f4 | |||
| 1080169274 | |||
| ae06584e62 | |||
| 1f23f4e8b2 | |||
| 9dc6c3c6e9 | |||
| 4446f59262 | |||
| fe8b59600c | |||
| 56e056626e | |||
| de73ec8c6a | |||
| 09db46b4fd | |||
| fb9a9cf075 | |||
| 7a1032d6ed | |||
| 44338a2bf3 | |||
| 1a093ebdc2 | |||
| bb5c04dd92 | |||
| 8eff5a2f29 | |||
| 9cf4811ede | |||
| 745cd43b4c | |||
| bfa3c341e6 | |||
| 40ef295cef | |||
| 4e8e581448 | |||
| 5062c5a251 | |||
| 813ee5d403 | |||
| 5c0c1162a9 | |||
| a3c1ddfc3d | |||
| d8053cb7fd | |||
| ac6f9aaff9 | |||
| a24df6ea71 | |||
| db27a5a49a | |||
| d23f78bb33 | |||
| aacea6a8cf | |||
| 0c65031450 | |||
| 0b72adf15b | |||
| 9df29448ff | |||
| e68a6bc89a | |||
| dc80e044fa | |||
| 41d4f81200 | |||
| cca70d85d0 | |||
| 2535920770 | |||
| e4acf33c30 | |||
| 2daa25b47e | |||
| f9b38be2f3 | |||
| 270e52537d | |||
| e409f3d656 | |||
| 3d506178ed | |||
| d67c8e49e6 | |||
| 205c7204ef | |||
| 6125f01e7d | |||
| 5625d4fcf5 | |||
| ec9bdf126e | |||
| 5bab1f33ec | |||
| 89aef9f5d1 | |||
| 40b555238f | |||
| 5af4118e7a | |||
| fab998c6e0 | |||
| 9e8539eae2 | |||
| 44ea2e63b8 | |||
| d0f7bbc3bd | |||
| ceacb1e61e | |||
| 7ca6c77d22 | |||
| 1b5170700a | |||
| 5007fa4d5f | |||
| 58e335594a | |||
| 1800122cb4 | |||
| a2ab7e6519 | |||
| d167a977ef | |||
| 3507ddc3cf | |||
| 1873687492 | |||
| 43176a74a0 | |||
| 8dfe488cdf | |||
| 6035e45cb8 | |||
| a31e476623 | |||
| e5c3d151bb | |||
| 6ccd20d45f | |||
| a5bc0eb37d | |||
| e98ee5e33d | |||
| 45e08aa9f6 | |||
| 456c0bced9 | |||
| 193e4bd73b | |||
| edcee32ab9 | |||
| 2549fc71fa | |||
| c60c373bc9 | |||
| 25a0c807cb | |||
| fabb0b8344 | |||
| b84bb43a07 | |||
| 15885f8b36 | |||
| 6b1adb8b12 | |||
| 110dcbbb54 | |||
| 541f00b903 | |||
| 8aa9e9acc3 | |||
| d9e39ab5b1 | |||
| 683a7c0957 | |||
| a4296cbbc4 | |||
| efd98bd93a | |||
| 62fa0e8caa | |||
| 953a6159a4 | |||
| 11e17b3de9 | |||
| 549bb95bea | |||
| e3f78500e7 | |||
| 2265df51b4 | |||
| 4bcd2b90b4 | |||
| 96695edfed | |||
| 90faf513c9 | |||
| c166457cde | |||
| e3ce3c342a | |||
| dc633e22a3 | |||
| 20785580d1 | |||
| e914576167 | |||
| 22ece10a4a | |||
| 984e363180 | |||
| d5ae72d5e2 | |||
| ac18b2995b | |||
| 849eaccd78 | |||
| af0e171595 | |||
| 29b1aea736 | |||
| fd00958520 | |||
| a4ffafd46d | |||
| 9d887f2aac | |||
| c2d74c8ed7 | |||
| ad1cdb3a1c | |||
| 83f9d0dcd7 |
@@ -0,0 +1,18 @@
|
||||
name: "Setup GNU tools (macOS)"
|
||||
description: |
|
||||
Installs GNU versions of bash, getopt, and make on macOS runners.
|
||||
Required because lib.sh needs bash 4+, GNU getopt, and make 4+.
|
||||
This is a no-op on non-macOS runners.
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Setup GNU tools (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
shell: bash
|
||||
run: |
|
||||
brew install bash gnu-getopt make
|
||||
{
|
||||
echo "$(brew --prefix bash)/bin"
|
||||
echo "$(brew --prefix gnu-getopt)/bin"
|
||||
echo "$(brew --prefix make)/libexec/gnubin"
|
||||
} >> "$GITHUB_PATH"
|
||||
@@ -1,13 +1,13 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: pr${PR_NUMBER}-tls
|
||||
name: ${DEPLOY_NAME}-tls
|
||||
namespace: pr-deployment-certs
|
||||
spec:
|
||||
secretName: pr${PR_NUMBER}-tls
|
||||
secretName: ${DEPLOY_NAME}-tls
|
||||
issuerRef:
|
||||
name: letsencrypt
|
||||
kind: ClusterIssuer
|
||||
dnsNames:
|
||||
- "${PR_HOSTNAME}"
|
||||
- "*.${PR_HOSTNAME}"
|
||||
- "${DEPLOY_HOSTNAME}"
|
||||
- "*.${DEPLOY_HOSTNAME}"
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: coder-workspace-pr${PR_NUMBER}
|
||||
namespace: pr${PR_NUMBER}
|
||||
name: coder-workspace-${DEPLOY_NAME}
|
||||
namespace: ${DEPLOY_NAME}
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: coder-workspace-pr${PR_NUMBER}
|
||||
namespace: pr${PR_NUMBER}
|
||||
name: coder-workspace-${DEPLOY_NAME}
|
||||
namespace: ${DEPLOY_NAME}
|
||||
rules:
|
||||
- apiGroups: ["*"]
|
||||
resources: ["*"]
|
||||
@@ -19,13 +19,13 @@ rules:
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: coder-workspace-pr${PR_NUMBER}
|
||||
namespace: pr${PR_NUMBER}
|
||||
name: coder-workspace-${DEPLOY_NAME}
|
||||
namespace: ${DEPLOY_NAME}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: coder-workspace-pr${PR_NUMBER}
|
||||
namespace: pr${PR_NUMBER}
|
||||
name: coder-workspace-${DEPLOY_NAME}
|
||||
namespace: ${DEPLOY_NAME}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: coder-workspace-pr${PR_NUMBER}
|
||||
name: coder-workspace-${DEPLOY_NAME}
|
||||
|
||||
@@ -12,9 +12,23 @@ terraform {
|
||||
provider "coder" {
|
||||
}
|
||||
|
||||
variable "use_kubeconfig" {
|
||||
type = bool
|
||||
description = <<-EOF
|
||||
Use host kubeconfig? (true/false)
|
||||
|
||||
Set this to false if the Coder host is itself running as a Pod on the same
|
||||
Kubernetes cluster as you are deploying workspaces to.
|
||||
|
||||
Set this to true if the Coder host is running outside the Kubernetes cluster
|
||||
for workspaces. A valid "~/.kube/config" must be present on the Coder host.
|
||||
EOF
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "namespace" {
|
||||
type = string
|
||||
description = "The Kubernetes namespace to create workspaces in (must exist prior to creating workspaces)"
|
||||
description = "The Kubernetes namespace to create workspaces in (must exist prior to creating workspaces). If the Coder host is itself running as a Pod on the same Kubernetes cluster as you are deploying workspaces to, set this to the same namespace."
|
||||
}
|
||||
|
||||
data "coder_parameter" "cpu" {
|
||||
@@ -82,7 +96,8 @@ data "coder_parameter" "home_disk_size" {
|
||||
}
|
||||
|
||||
provider "kubernetes" {
|
||||
config_path = null
|
||||
# Authenticate via ~/.kube/config or a Coder-specific ServiceAccount, depending on admin preferences
|
||||
config_path = var.use_kubeconfig == true ? "~/.kube/config" : null
|
||||
}
|
||||
|
||||
data "coder_workspace" "me" {}
|
||||
@@ -94,10 +109,12 @@ resource "coder_agent" "main" {
|
||||
startup_script = <<-EOT
|
||||
set -e
|
||||
|
||||
# install and start code-server
|
||||
# Install the latest code-server.
|
||||
# Append "--version x.x.x" to install a specific version of code-server.
|
||||
curl -fsSL https://code-server.dev/install.sh | sh -s -- --method=standalone --prefix=/tmp/code-server
|
||||
/tmp/code-server/bin/code-server --auth none --port 13337 >/tmp/code-server.log 2>&1 &
|
||||
|
||||
# Start code-server in the background.
|
||||
/tmp/code-server/bin/code-server --auth none --port 13337 >/tmp/code-server.log 2>&1 &
|
||||
EOT
|
||||
|
||||
# The following metadata blocks are optional. They are used to display
|
||||
@@ -174,13 +191,13 @@ resource "coder_app" "code-server" {
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "home" {
|
||||
resource "kubernetes_persistent_volume_claim_v1" "home" {
|
||||
metadata {
|
||||
name = "coder-${lower(data.coder_workspace_owner.me.name)}-${lower(data.coder_workspace.me.name)}-home"
|
||||
name = "coder-${data.coder_workspace.me.id}-home"
|
||||
namespace = var.namespace
|
||||
labels = {
|
||||
"app.kubernetes.io/name" = "coder-pvc"
|
||||
"app.kubernetes.io/instance" = "coder-pvc-${lower(data.coder_workspace_owner.me.name)}-${lower(data.coder_workspace.me.name)}"
|
||||
"app.kubernetes.io/instance" = "coder-pvc-${data.coder_workspace.me.id}"
|
||||
"app.kubernetes.io/part-of" = "coder"
|
||||
//Coder-specific labels.
|
||||
"com.coder.resource" = "true"
|
||||
@@ -204,18 +221,18 @@ resource "kubernetes_persistent_volume_claim" "home" {
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_deployment" "main" {
|
||||
resource "kubernetes_deployment_v1" "main" {
|
||||
count = data.coder_workspace.me.start_count
|
||||
depends_on = [
|
||||
kubernetes_persistent_volume_claim.home
|
||||
kubernetes_persistent_volume_claim_v1.home
|
||||
]
|
||||
wait_for_rollout = false
|
||||
metadata {
|
||||
name = "coder-${lower(data.coder_workspace_owner.me.name)}-${lower(data.coder_workspace.me.name)}"
|
||||
name = "coder-${data.coder_workspace.me.id}"
|
||||
namespace = var.namespace
|
||||
labels = {
|
||||
"app.kubernetes.io/name" = "coder-workspace"
|
||||
"app.kubernetes.io/instance" = "coder-workspace-${lower(data.coder_workspace_owner.me.name)}-${lower(data.coder_workspace.me.name)}"
|
||||
"app.kubernetes.io/instance" = "coder-workspace-${data.coder_workspace.me.id}"
|
||||
"app.kubernetes.io/part-of" = "coder"
|
||||
"com.coder.resource" = "true"
|
||||
"com.coder.workspace.id" = data.coder_workspace.me.id
|
||||
@@ -232,7 +249,14 @@ resource "kubernetes_deployment" "main" {
|
||||
replicas = 1
|
||||
selector {
|
||||
match_labels = {
|
||||
"app.kubernetes.io/name" = "coder-workspace"
|
||||
"app.kubernetes.io/name" = "coder-workspace"
|
||||
"app.kubernetes.io/instance" = "coder-workspace-${data.coder_workspace.me.id}"
|
||||
"app.kubernetes.io/part-of" = "coder"
|
||||
"com.coder.resource" = "true"
|
||||
"com.coder.workspace.id" = data.coder_workspace.me.id
|
||||
"com.coder.workspace.name" = data.coder_workspace.me.name
|
||||
"com.coder.user.id" = data.coder_workspace_owner.me.id
|
||||
"com.coder.user.username" = data.coder_workspace_owner.me.name
|
||||
}
|
||||
}
|
||||
strategy {
|
||||
@@ -242,20 +266,29 @@ resource "kubernetes_deployment" "main" {
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
"app.kubernetes.io/name" = "coder-workspace"
|
||||
"app.kubernetes.io/name" = "coder-workspace"
|
||||
"app.kubernetes.io/instance" = "coder-workspace-${data.coder_workspace.me.id}"
|
||||
"app.kubernetes.io/part-of" = "coder"
|
||||
"com.coder.resource" = "true"
|
||||
"com.coder.workspace.id" = data.coder_workspace.me.id
|
||||
"com.coder.workspace.name" = data.coder_workspace.me.name
|
||||
"com.coder.user.id" = data.coder_workspace_owner.me.id
|
||||
"com.coder.user.username" = data.coder_workspace_owner.me.name
|
||||
}
|
||||
}
|
||||
spec {
|
||||
hostname = lower(data.coder_workspace.me.name)
|
||||
|
||||
security_context {
|
||||
run_as_user = 1000
|
||||
fs_group = 1000
|
||||
run_as_user = 1000
|
||||
fs_group = 1000
|
||||
run_as_non_root = true
|
||||
}
|
||||
|
||||
service_account_name = "coder-workspace-${var.namespace}"
|
||||
container {
|
||||
name = "dev"
|
||||
image = "bencdr/devops-tools"
|
||||
image_pull_policy = "Always"
|
||||
image = "codercom/enterprise-base:ubuntu"
|
||||
image_pull_policy = "IfNotPresent"
|
||||
command = ["sh", "-c", coder_agent.main.init_script]
|
||||
security_context {
|
||||
run_as_user = "1000"
|
||||
@@ -284,7 +317,7 @@ resource "kubernetes_deployment" "main" {
|
||||
volume {
|
||||
name = "home"
|
||||
persistent_volume_claim {
|
||||
claim_name = kubernetes_persistent_volume_claim.home.metadata.0.name
|
||||
claim_name = kubernetes_persistent_volume_claim_v1.home.metadata.0.name
|
||||
read_only = false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,24 +1,26 @@
|
||||
coder:
|
||||
podAnnotations:
|
||||
deploy-sha: "${GITHUB_SHA}"
|
||||
image:
|
||||
repo: "${REPO}"
|
||||
tag: "pr${PR_NUMBER}"
|
||||
tag: "${DEPLOY_NAME}"
|
||||
pullPolicy: Always
|
||||
service:
|
||||
type: ClusterIP
|
||||
ingress:
|
||||
enable: true
|
||||
className: traefik
|
||||
host: "${PR_HOSTNAME}"
|
||||
wildcardHost: "*.${PR_HOSTNAME}"
|
||||
host: "${DEPLOY_HOSTNAME}"
|
||||
wildcardHost: "*.${DEPLOY_HOSTNAME}"
|
||||
tls:
|
||||
enable: true
|
||||
secretName: "pr${PR_NUMBER}-tls"
|
||||
wildcardSecretName: "pr${PR_NUMBER}-tls"
|
||||
secretName: "${DEPLOY_NAME}-tls"
|
||||
wildcardSecretName: "${DEPLOY_NAME}-tls"
|
||||
env:
|
||||
- name: "CODER_ACCESS_URL"
|
||||
value: "https://${PR_HOSTNAME}"
|
||||
value: "https://${DEPLOY_HOSTNAME}"
|
||||
- name: "CODER_WILDCARD_ACCESS_URL"
|
||||
value: "*.${PR_HOSTNAME}"
|
||||
value: "*.${DEPLOY_HOSTNAME}"
|
||||
- name: "CODER_EXPERIMENTS"
|
||||
value: "${EXPERIMENTS}"
|
||||
- name: CODER_PG_CONNECTION_URL
|
||||
|
||||
@@ -0,0 +1,408 @@
|
||||
name: Deploy Branch
|
||||
|
||||
on:
|
||||
push:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: deploy-${{ github.ref_name }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
permissions:
|
||||
packages: write
|
||||
env:
|
||||
CODER_IMAGE_TAG: "ghcr.io/coder/coder-preview:${{ github.ref_name }}"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Setup sqlc
|
||||
uses: ./.github/actions/setup-sqlc
|
||||
|
||||
- name: GHCR Login
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
run: |
|
||||
set -euo pipefail
|
||||
go mod download
|
||||
make gen/mark-fresh
|
||||
export DOCKER_IMAGE_NO_PREREQUISITES=true
|
||||
version="$(./scripts/version.sh)"
|
||||
CODER_IMAGE_BUILD_BASE_TAG="$(CODER_IMAGE_BASE=coder-base ./scripts/image_tag.sh --version "$version")"
|
||||
export CODER_IMAGE_BUILD_BASE_TAG
|
||||
make -j build/coder_linux_amd64
|
||||
./scripts/build_docker.sh \
|
||||
--arch amd64 \
|
||||
--target "${CODER_IMAGE_TAG}" \
|
||||
--version "$version" \
|
||||
--push \
|
||||
build/coder_linux_amd64
|
||||
|
||||
deploy:
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
BRANCH_NAME: ${{ github.ref_name }}
|
||||
DEPLOY_NAME: "${{ github.ref_name }}"
|
||||
TEST_DOMAIN_SUFFIX: "${{ startsWith(secrets.PR_DEPLOYMENTS_DOMAIN, 'test.') && secrets.PR_DEPLOYMENTS_DOMAIN || format('test.{0}', secrets.PR_DEPLOYMENTS_DOMAIN) }}"
|
||||
BRANCH_HOSTNAME: "${{ github.ref_name }}.${{ startsWith(secrets.PR_DEPLOYMENTS_DOMAIN, 'test.') && secrets.PR_DEPLOYMENTS_DOMAIN || format('test.{0}', secrets.PR_DEPLOYMENTS_DOMAIN) }}"
|
||||
CODER_IMAGE_TAG: "ghcr.io/coder/coder-preview:${{ github.ref_name }}"
|
||||
REPO: ghcr.io/coder/coder-preview
|
||||
EXPERIMENTS: "*,oauth2,mcp-server-http"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up kubeconfig
|
||||
run: |
|
||||
set -euo pipefail
|
||||
mkdir -p ~/.kube
|
||||
echo "${{ secrets.PR_DEPLOYMENTS_KUBECONFIG_BASE64 }}" | base64 --decode > ~/.kube/config
|
||||
chmod 600 ~/.kube/config
|
||||
|
||||
- name: Verify cluster authentication
|
||||
run: |
|
||||
set -euo pipefail
|
||||
kubectl auth can-i get namespaces > /dev/null
|
||||
|
||||
- name: Check if deployment exists
|
||||
id: check
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
set +e
|
||||
helm_status_output="$(helm status "${DEPLOY_NAME}" --namespace "${DEPLOY_NAME}" 2>&1)"
|
||||
helm_status_code=$?
|
||||
set -e
|
||||
|
||||
if [ "$helm_status_code" -eq 0 ]; then
|
||||
echo "new=false" >> "$GITHUB_OUTPUT"
|
||||
elif echo "$helm_status_output" | grep -qi "release: not found"; then
|
||||
echo "new=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "$helm_status_output"
|
||||
exit "$helm_status_code"
|
||||
fi
|
||||
|
||||
# ---- Every push: ensure routing + TLS ----
|
||||
|
||||
- name: Ensure DNS records
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
api_base_url="https://api.cloudflare.com/client/v4/zones/${{ secrets.PR_DEPLOYMENTS_ZONE_ID }}/dns_records"
|
||||
base_name="${BRANCH_HOSTNAME}"
|
||||
base_target="${TEST_DOMAIN_SUFFIX}"
|
||||
wildcard_name="*.${BRANCH_HOSTNAME}"
|
||||
|
||||
ensure_cname_record() {
|
||||
local record_name="$1"
|
||||
local record_content="$2"
|
||||
|
||||
echo "Ensuring CNAME ${record_name} -> ${record_content}."
|
||||
|
||||
set +e
|
||||
lookup_raw_response="$(
|
||||
curl -sS -G "${api_base_url}" \
|
||||
-H "Authorization: Bearer ${{ secrets.PR_DEPLOYMENTS_CLOUDFLARE_API_TOKEN }}" \
|
||||
-H "Content-Type:application/json" \
|
||||
--data-urlencode "name=${record_name}" \
|
||||
--data-urlencode "per_page=100" \
|
||||
-w '\n%{http_code}'
|
||||
)"
|
||||
lookup_exit_code=$?
|
||||
set -e
|
||||
|
||||
if [ "$lookup_exit_code" -eq 0 ]; then
|
||||
lookup_response="${lookup_raw_response%$'\n'*}"
|
||||
lookup_http_code="${lookup_raw_response##*$'\n'}"
|
||||
|
||||
if [ "$lookup_http_code" = "200" ] && echo "$lookup_response" | jq -e '.success == true' > /dev/null 2>&1; then
|
||||
if echo "$lookup_response" | jq -e '.result[]? | select(.type != "CNAME")' > /dev/null 2>&1; then
|
||||
echo "Conflicting non-CNAME DNS record exists for ${record_name}."
|
||||
echo "$lookup_response"
|
||||
return 1
|
||||
fi
|
||||
|
||||
existing_cname_id="$(echo "$lookup_response" | jq -r '.result[]? | select(.type == "CNAME") | .id' | head -n1)"
|
||||
if [ -n "$existing_cname_id" ]; then
|
||||
existing_content="$(echo "$lookup_response" | jq -r --arg id "$existing_cname_id" '.result[] | select(.id == $id) | .content')"
|
||||
if [ "$existing_content" = "$record_content" ]; then
|
||||
echo "CNAME already set for ${record_name}."
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "Updating existing CNAME for ${record_name}."
|
||||
update_response="$(
|
||||
curl -sS -X PUT "${api_base_url}/${existing_cname_id}" \
|
||||
-H "Authorization: Bearer ${{ secrets.PR_DEPLOYMENTS_CLOUDFLARE_API_TOKEN }}" \
|
||||
-H "Content-Type:application/json" \
|
||||
--data '{"type":"CNAME","name":"'"${record_name}"'","content":"'"${record_content}"'","ttl":1,"proxied":false}'
|
||||
)"
|
||||
|
||||
if echo "$update_response" | jq -e '.success == true' > /dev/null 2>&1; then
|
||||
echo "Updated CNAME for ${record_name}."
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "Cloudflare API error while updating ${record_name}:"
|
||||
echo "$update_response"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "Could not query DNS record ${record_name}; attempting create."
|
||||
fi
|
||||
|
||||
max_attempts=6
|
||||
attempt=1
|
||||
last_response=""
|
||||
last_http_code=""
|
||||
|
||||
while [ "$attempt" -le "$max_attempts" ]; do
|
||||
echo "Creating DNS record ${record_name} (attempt ${attempt}/${max_attempts})."
|
||||
|
||||
set +e
|
||||
raw_response="$(
|
||||
curl -sS -X POST "${api_base_url}" \
|
||||
-H "Authorization: Bearer ${{ secrets.PR_DEPLOYMENTS_CLOUDFLARE_API_TOKEN }}" \
|
||||
-H "Content-Type:application/json" \
|
||||
--data '{"type":"CNAME","name":"'"${record_name}"'","content":"'"${record_content}"'","ttl":1,"proxied":false}' \
|
||||
-w '\n%{http_code}'
|
||||
)"
|
||||
curl_exit_code=$?
|
||||
set -e
|
||||
|
||||
curl_failed=false
|
||||
if [ "$curl_exit_code" -eq 0 ]; then
|
||||
response="${raw_response%$'\n'*}"
|
||||
http_code="${raw_response##*$'\n'}"
|
||||
else
|
||||
response="curl exited with code ${curl_exit_code}."
|
||||
http_code="000"
|
||||
curl_failed=true
|
||||
fi
|
||||
|
||||
last_response="$response"
|
||||
last_http_code="$http_code"
|
||||
|
||||
if echo "$response" | jq -e '.success == true' > /dev/null 2>&1; then
|
||||
echo "Created DNS record ${record_name}."
|
||||
return 0
|
||||
fi
|
||||
|
||||
# 81057: identical record exists. 81053: host record conflict.
|
||||
if echo "$response" | jq -e '.errors[]? | select(.code == 81057 or .code == 81053)' > /dev/null 2>&1; then
|
||||
echo "DNS record already exists for ${record_name}."
|
||||
return 0
|
||||
fi
|
||||
|
||||
transient_error=false
|
||||
if [ "$curl_failed" = true ] || [ "$http_code" = "429" ]; then
|
||||
transient_error=true
|
||||
elif [[ "$http_code" =~ ^[0-9]{3}$ ]] && [ "$http_code" -ge 500 ] && [ "$http_code" -lt 600 ]; then
|
||||
transient_error=true
|
||||
fi
|
||||
|
||||
if echo "$response" | jq -e '.errors[]? | select(.code == 10000 or .code == 10001)' > /dev/null 2>&1; then
|
||||
transient_error=true
|
||||
fi
|
||||
|
||||
if [ "$transient_error" = true ] && [ "$attempt" -lt "$max_attempts" ]; then
|
||||
sleep_seconds=$((attempt * 5))
|
||||
echo "Transient Cloudflare API error (HTTP ${http_code}). Retrying in ${sleep_seconds}s."
|
||||
sleep "$sleep_seconds"
|
||||
attempt=$((attempt + 1))
|
||||
continue
|
||||
fi
|
||||
|
||||
break
|
||||
done
|
||||
|
||||
echo "Cloudflare API error while creating DNS record ${record_name} after ${attempt} attempt(s):"
|
||||
echo "HTTP status: ${last_http_code}"
|
||||
echo "$last_response"
|
||||
return 1
|
||||
}
|
||||
|
||||
ensure_cname_record "${base_name}" "${base_target}"
|
||||
ensure_cname_record "${wildcard_name}" "${base_name}"
|
||||
|
||||
# ---- First deploy only ----
|
||||
|
||||
- name: Create namespace
|
||||
if: steps.check.outputs.new == 'true'
|
||||
run: |
|
||||
set -euo pipefail
|
||||
kubectl delete namespace "${DEPLOY_NAME}" --wait=true || true
|
||||
# Delete any orphaned PVs that were bound to PVCs in this
|
||||
# namespace. Without this, the old PV (with stale Postgres
|
||||
# data) gets reused on reinstall, causing auth failures.
|
||||
kubectl get pv -o json | \
|
||||
jq -r '.items[] | select(.spec.claimRef.namespace=='"${DEPLOY_NAME}"') | .metadata.name' | \
|
||||
xargs -r kubectl delete pv || true
|
||||
kubectl create namespace "${DEPLOY_NAME}"
|
||||
|
||||
# ---- Every push: ensure deployment certificate ----
|
||||
|
||||
- name: Ensure certificate
|
||||
env:
|
||||
DEPLOY_HOSTNAME: ${{ env.BRANCH_HOSTNAME }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
cert_secret_name="${DEPLOY_NAME}-tls"
|
||||
|
||||
envsubst < ./.github/pr-deployments/certificate.yaml | kubectl apply -f -
|
||||
|
||||
if ! kubectl -n pr-deployment-certs wait --for=condition=Ready "certificate/${cert_secret_name}" --timeout=10m; then
|
||||
echo "Timed out waiting for certificate ${cert_secret_name} to become Ready after 10 minutes."
|
||||
kubectl -n pr-deployment-certs describe certificate "${cert_secret_name}" || true
|
||||
kubectl -n pr-deployment-certs get certificaterequest,order,challenge -l "cert-manager.io/certificate-name=${cert_secret_name}" || true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
kubectl get secret "${cert_secret_name}" -n pr-deployment-certs -o json |
|
||||
jq 'del(.metadata.namespace,.metadata.creationTimestamp,.metadata.resourceVersion,.metadata.selfLink,.metadata.uid,.metadata.managedFields)' |
|
||||
kubectl -n "${DEPLOY_NAME}" apply -f -
|
||||
|
||||
- name: Set up PostgreSQL
|
||||
if: steps.check.outputs.new == 'true'
|
||||
run: |
|
||||
helm repo add bitnami https://charts.bitnami.com/bitnami
|
||||
helm install coder-db bitnami/postgresql \
|
||||
--namespace "${DEPLOY_NAME}" \
|
||||
--set image.repository=bitnamilegacy/postgresql \
|
||||
--set auth.username=coder \
|
||||
--set auth.password=coder \
|
||||
--set auth.database=coder \
|
||||
--set persistence.size=10Gi
|
||||
kubectl create secret generic coder-db-url -n "${DEPLOY_NAME}" \
|
||||
--from-literal=url="postgres://coder:coder@coder-db-postgresql.${DEPLOY_NAME}.svc.cluster.local:5432/coder?sslmode=disable"
|
||||
|
||||
- name: Create RBAC
|
||||
if: steps.check.outputs.new == 'true'
|
||||
run: envsubst < ./.github/pr-deployments/rbac.yaml | kubectl apply -f -
|
||||
|
||||
# ---- Every push ----
|
||||
|
||||
- name: Create values.yaml
|
||||
env:
|
||||
DEPLOY_HOSTNAME: ${{ env.BRANCH_HOSTNAME }}
|
||||
REPO: ${{ env.REPO }}
|
||||
PR_DEPLOYMENTS_GITHUB_OAUTH_CLIENT_ID: ${{ secrets.PR_DEPLOYMENTS_GITHUB_OAUTH_CLIENT_ID }}
|
||||
PR_DEPLOYMENTS_GITHUB_OAUTH_CLIENT_SECRET: ${{ secrets.PR_DEPLOYMENTS_GITHUB_OAUTH_CLIENT_SECRET }}
|
||||
run: envsubst < ./.github/pr-deployments/values.yaml > ./deploy-values.yaml
|
||||
|
||||
- name: Install/Upgrade Helm chart
|
||||
run: |
|
||||
set -euo pipefail
|
||||
helm dependency update --skip-refresh ./helm/coder
|
||||
helm upgrade --install "${DEPLOY_NAME}" ./helm/coder \
|
||||
--namespace "${DEPLOY_NAME}" \
|
||||
--values ./deploy-values.yaml \
|
||||
--force
|
||||
|
||||
- name: Install coder-logstream-kube
|
||||
if: steps.check.outputs.new == 'true'
|
||||
run: |
|
||||
helm repo add coder-logstream-kube https://helm.coder.com/logstream-kube
|
||||
helm upgrade --install coder-logstream-kube coder-logstream-kube/coder-logstream-kube \
|
||||
--namespace "${DEPLOY_NAME}" \
|
||||
--set url="https://${BRANCH_HOSTNAME}" \
|
||||
--set "namespaces[0]=${DEPLOY_NAME}"
|
||||
|
||||
- name: Create first user and template
|
||||
if: steps.check.outputs.new == 'true'
|
||||
env:
|
||||
PR_DEPLOYMENTS_ADMIN_PASSWORD: ${{ secrets.PR_DEPLOYMENTS_ADMIN_PASSWORD }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
URL="https://${BRANCH_HOSTNAME}/bin/coder-linux-amd64"
|
||||
COUNT=0
|
||||
until curl --output /dev/null --silent --head --fail "$URL"; do
|
||||
sleep 5
|
||||
COUNT=$((COUNT+1))
|
||||
if [ "$COUNT" -ge 60 ]; then echo "Timed out"; exit 1; fi
|
||||
done
|
||||
curl -fsSL "$URL" -o /tmp/coder && chmod +x /tmp/coder
|
||||
|
||||
password="${PR_DEPLOYMENTS_ADMIN_PASSWORD}"
|
||||
if [ -z "$password" ]; then
|
||||
echo "Missing PR_DEPLOYMENTS_ADMIN_PASSWORD repository secret."
|
||||
exit 1
|
||||
fi
|
||||
echo "::add-mask::$password"
|
||||
|
||||
admin_username="${BRANCH_NAME}-admin"
|
||||
admin_email="${BRANCH_NAME}@coder.com"
|
||||
coder_url="https://${BRANCH_HOSTNAME}"
|
||||
|
||||
first_user_status="$(curl -sS -o /dev/null -w '%{http_code}' "${coder_url}/api/v2/users/first")"
|
||||
if [ "$first_user_status" = "404" ]; then
|
||||
/tmp/coder login \
|
||||
--first-user-username "$admin_username" \
|
||||
--first-user-email "$admin_email" \
|
||||
--first-user-password "$password" \
|
||||
--first-user-trial=false \
|
||||
--use-token-as-session \
|
||||
"$coder_url"
|
||||
elif [ "$first_user_status" = "200" ]; then
|
||||
login_payload="$(jq -n --arg email "$admin_email" --arg password "$password" '{email: $email, password: $password}')"
|
||||
login_response="$(
|
||||
curl -sS -X POST "${coder_url}/api/v2/users/login" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data "$login_payload" \
|
||||
-w '\n%{http_code}'
|
||||
)"
|
||||
login_body="${login_response%$'\n'*}"
|
||||
login_status="${login_response##*$'\n'}"
|
||||
|
||||
if [ "$login_status" != "201" ]; then
|
||||
echo "Password login failed for existing deployment (HTTP ${login_status})."
|
||||
echo "$login_body"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
session_token="$(echo "$login_body" | jq -r '.session_token // empty')"
|
||||
if [ -z "$session_token" ]; then
|
||||
echo "Password login response is missing session_token."
|
||||
exit 1
|
||||
fi
|
||||
echo "::add-mask::$session_token"
|
||||
|
||||
/tmp/coder login \
|
||||
--token "$session_token" \
|
||||
--use-token-as-session \
|
||||
"$coder_url"
|
||||
else
|
||||
echo "Unexpected status from /api/v2/users/first: ${first_user_status}."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd .github/pr-deployments/template
|
||||
/tmp/coder templates push -y --directory . --variable "namespace=${DEPLOY_NAME}" kubernetes
|
||||
/tmp/coder create --template="kubernetes" kube \
|
||||
--parameter cpu=2 --parameter memory=4 --parameter home_disk_size=2 -y
|
||||
/tmp/coder stop kube -y
|
||||
@@ -414,17 +414,8 @@ jobs:
|
||||
id: go-paths
|
||||
uses: ./.github/actions/setup-go-paths
|
||||
|
||||
# macOS default bash and coreutils are too old for our scripts
|
||||
# (lib.sh requires bash 4+, GNU getopt, make 4+).
|
||||
- name: Setup GNU tools (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
brew install bash gnu-getopt make
|
||||
{
|
||||
echo "$(brew --prefix bash)/bin"
|
||||
echo "$(brew --prefix gnu-getopt)/bin"
|
||||
echo "$(brew --prefix make)/libexec/gnubin"
|
||||
} >> "$GITHUB_PATH"
|
||||
uses: ./.github/actions/setup-gnu-tools
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
@@ -1056,14 +1047,8 @@ jobs:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup build tools
|
||||
run: |
|
||||
brew install bash gnu-getopt make
|
||||
{
|
||||
echo "$(brew --prefix bash)/bin"
|
||||
echo "$(brew --prefix gnu-getopt)/bin"
|
||||
echo "$(brew --prefix make)/libexec/gnubin"
|
||||
} >> "$GITHUB_PATH"
|
||||
- name: Setup GNU tools (macOS)
|
||||
uses: ./.github/actions/setup-gnu-tools
|
||||
|
||||
- name: Switch XCode Version
|
||||
uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0
|
||||
|
||||
@@ -6,9 +6,7 @@
|
||||
# native suggestion syntax, allowing one-click commits of suggested changes.
|
||||
#
|
||||
# Triggers:
|
||||
# - New PR opened: Initial code review
|
||||
# - Label "code-review" added: Re-run review on demand
|
||||
# - PR marked ready for review: Review when draft is promoted
|
||||
# - Label "code-review" added: Run review on demand
|
||||
# - Workflow dispatch: Manual run with PR URL
|
||||
#
|
||||
# Note: This workflow requires access to secrets and will be skipped for:
|
||||
@@ -20,9 +18,7 @@ name: AI Code Review
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- labeled
|
||||
- ready_for_review
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
pr_url:
|
||||
@@ -44,9 +40,7 @@ jobs:
|
||||
cancel-in-progress: true
|
||||
if: |
|
||||
(
|
||||
github.event.action == 'opened' ||
|
||||
github.event.label.name == 'code-review' ||
|
||||
github.event.action == 'ready_for_review' ||
|
||||
github.event_name == 'workflow_dispatch'
|
||||
) &&
|
||||
(github.event.pull_request.draft == false || github.event_name == 'workflow_dispatch')
|
||||
@@ -127,15 +121,9 @@ jobs:
|
||||
|
||||
# Set trigger type based on action
|
||||
case "${GITHUB_EVENT_ACTION}" in
|
||||
opened)
|
||||
echo "trigger_type=new_pr" >> "${GITHUB_OUTPUT}"
|
||||
;;
|
||||
labeled)
|
||||
echo "trigger_type=label_requested" >> "${GITHUB_OUTPUT}"
|
||||
;;
|
||||
ready_for_review)
|
||||
echo "trigger_type=ready_for_review" >> "${GITHUB_OUTPUT}"
|
||||
;;
|
||||
*)
|
||||
echo "trigger_type=unknown" >> "${GITHUB_OUTPUT}"
|
||||
;;
|
||||
@@ -157,15 +145,9 @@ jobs:
|
||||
|
||||
# Build context based on trigger type
|
||||
case "${TRIGGER_TYPE}" in
|
||||
new_pr)
|
||||
CONTEXT="This is a NEW PR. Perform a thorough code review."
|
||||
;;
|
||||
label_requested)
|
||||
CONTEXT="A code review was REQUESTED via label. Perform a thorough code review."
|
||||
;;
|
||||
ready_for_review)
|
||||
CONTEXT="This PR was marked READY FOR REVIEW. Perform a thorough code review."
|
||||
;;
|
||||
manual)
|
||||
CONTEXT="This is a MANUAL review request. Perform a thorough code review."
|
||||
;;
|
||||
|
||||
@@ -160,34 +160,41 @@ jobs:
|
||||
# Build context based on trigger type
|
||||
case "${TRIGGER_TYPE}" in
|
||||
new_pr)
|
||||
CONTEXT="This is a NEW PR. Perform a thorough documentation review."
|
||||
CONTEXT="This is a NEW PR. Perform initial documentation review."
|
||||
;;
|
||||
pr_updated)
|
||||
CONTEXT="This PR was UPDATED with new commits. Only comment if the changes affect documentation needs or address previous feedback."
|
||||
CONTEXT="This PR was UPDATED with new commits. Check if previous feedback was addressed or if new doc needs arose."
|
||||
;;
|
||||
label_requested)
|
||||
CONTEXT="A documentation review was REQUESTED via label. Perform a thorough documentation review."
|
||||
CONTEXT="A documentation review was REQUESTED via label. Perform a thorough review."
|
||||
;;
|
||||
ready_for_review)
|
||||
CONTEXT="This PR was marked READY FOR REVIEW (converted from draft). Perform a thorough documentation review."
|
||||
CONTEXT="This PR was marked READY FOR REVIEW. Perform a thorough review."
|
||||
;;
|
||||
manual)
|
||||
CONTEXT="This is a MANUAL review request. Perform a thorough documentation review."
|
||||
CONTEXT="This is a MANUAL review request. Perform a thorough review."
|
||||
;;
|
||||
*)
|
||||
CONTEXT="Perform a thorough documentation review."
|
||||
CONTEXT="Perform a documentation review."
|
||||
;;
|
||||
esac
|
||||
|
||||
# Build task prompt with PR-specific context
|
||||
# Build task prompt with sticky comment logic
|
||||
TASK_PROMPT="Use the doc-check skill to review PR #${PR_NUMBER} in coder/coder.
|
||||
|
||||
${CONTEXT}
|
||||
|
||||
Use \`gh\` to get PR details, diff, and all comments. Check for previous doc-check comments (from coder-doc-check) and only post a new comment if it adds value.
|
||||
Use \`gh\` to get PR details, diff, and all comments. Look for an existing doc-check comment containing \`<!-- doc-check-sticky -->\` - if one exists, you'll update it instead of creating a new one.
|
||||
|
||||
**Do not comment if no documentation changes are needed.**
|
||||
|
||||
If a sticky comment already exists, compare your current findings against it:
|
||||
- Check off \`[x]\` items that are now addressed
|
||||
- Strikethrough items no longer needed (e.g., code was reverted)
|
||||
- Add new unchecked \`[ ]\` items for newly discovered needs
|
||||
- If an item is checked but you can't verify the docs were added, add a warning note below it
|
||||
- If nothing meaningful changed, don't update the comment at all
|
||||
|
||||
## Comment format
|
||||
|
||||
Use this structure (only include relevant sections):
|
||||
@@ -195,18 +202,21 @@ jobs:
|
||||
\`\`\`
|
||||
## Documentation Check
|
||||
|
||||
### Previous Feedback
|
||||
[For re-reviews only: Addressed | Partially addressed | Not yet addressed]
|
||||
|
||||
### Updates Needed
|
||||
- [ ] \`docs/path/file.md\` - [what needs to change]
|
||||
- [ ] \`docs/path/file.md\` - What needs to change
|
||||
- [x] \`docs/other/file.md\` - This was addressed
|
||||
- ~~\`docs/removed.md\` - No longer needed~~ *(reverted in abc123)*
|
||||
|
||||
### New Documentation Needed
|
||||
- [ ] \`docs/suggested/path.md\` - [what should be documented]
|
||||
- [ ] \`docs/suggested/path.md\` - What should be documented
|
||||
> ⚠️ *Checked but no corresponding documentation changes found in this PR*
|
||||
|
||||
---
|
||||
*Automated review via [Coder Tasks](https://coder.com/docs/ai-coder/tasks)*
|
||||
\`\`\`"
|
||||
<!-- doc-check-sticky -->
|
||||
\`\`\`
|
||||
|
||||
The \`<!-- doc-check-sticky -->\` marker must be at the end so future runs can find and update this comment."
|
||||
|
||||
# Output the prompt
|
||||
{
|
||||
|
||||
@@ -59,6 +59,9 @@ jobs:
|
||||
fetch-depth: 1
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup GNU tools (macOS)
|
||||
uses: ./.github/actions/setup-gnu-tools
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
with:
|
||||
|
||||
@@ -285,6 +285,8 @@ jobs:
|
||||
PR_NUMBER: ${{ needs.get_info.outputs.PR_NUMBER }}
|
||||
PR_TITLE: ${{ needs.get_info.outputs.PR_TITLE }}
|
||||
PR_URL: ${{ needs.get_info.outputs.PR_URL }}
|
||||
DEPLOY_NAME: "pr${{ needs.get_info.outputs.PR_NUMBER }}"
|
||||
DEPLOY_HOSTNAME: "pr${{ needs.get_info.outputs.PR_NUMBER }}.${{ secrets.PR_DEPLOYMENTS_DOMAIN }}"
|
||||
PR_HOSTNAME: "pr${{ needs.get_info.outputs.PR_NUMBER }}.${{ secrets.PR_DEPLOYMENTS_DOMAIN }}"
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
@@ -521,7 +523,7 @@ jobs:
|
||||
run: |
|
||||
set -euo pipefail
|
||||
cd .github/pr-deployments/template
|
||||
coder templates push -y --variable "namespace=pr${PR_NUMBER}" kubernetes
|
||||
coder templates push -y --directory . --variable "namespace=pr${PR_NUMBER}" kubernetes
|
||||
|
||||
# Create workspace
|
||||
coder create --template="kubernetes" kube --parameter cpu=2 --parameter memory=4 --parameter home_disk_size=2 -y
|
||||
|
||||
@@ -78,14 +78,8 @@ jobs:
|
||||
- name: Fetch git tags
|
||||
run: git fetch --tags --force
|
||||
|
||||
- name: Setup build tools
|
||||
run: |
|
||||
brew install bash gnu-getopt make
|
||||
{
|
||||
echo "$(brew --prefix bash)/bin"
|
||||
echo "$(brew --prefix gnu-getopt)/bin"
|
||||
echo "$(brew --prefix make)/libexec/gnubin"
|
||||
} >> "$GITHUB_PATH"
|
||||
- name: Setup GNU tools (macOS)
|
||||
uses: ./.github/actions/setup-gnu-tools
|
||||
|
||||
- name: Switch XCode Version
|
||||
uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0
|
||||
|
||||
@@ -938,6 +938,7 @@ coderd/apidoc/.gen: \
|
||||
coderd/rbac/object_gen.go \
|
||||
.swaggo \
|
||||
scripts/apidocgen/generate.sh \
|
||||
scripts/apidocgen/swaginit/main.go \
|
||||
$(wildcard scripts/apidocgen/postprocess/*) \
|
||||
$(wildcard scripts/apidocgen/markdown-template/*)
|
||||
./scripts/apidocgen/generate.sh
|
||||
|
||||
+114
-44
@@ -12,6 +12,7 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
@@ -39,7 +40,6 @@ import (
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/clistat"
|
||||
"github.com/coder/coder/v2/agent/agentcontainers"
|
||||
"github.com/coder/coder/v2/agent/agentutil"
|
||||
"github.com/coder/coder/v2/agent/agentexec"
|
||||
"github.com/coder/coder/v2/agent/agentfiles"
|
||||
"github.com/coder/coder/v2/agent/agentscripts"
|
||||
@@ -554,7 +554,7 @@ func (a *agent) reportMetadata(ctx context.Context, aAPI proto.DRPCAgentClient28
|
||||
// Set up collect and report as a single ticker with two channels,
|
||||
// this is to allow collection and reporting to be triggered
|
||||
// independently of each other.
|
||||
agentutil.Go(ctx, a.logger, func() {
|
||||
go func() {
|
||||
t := time.NewTicker(a.reportMetadataInterval)
|
||||
defer func() {
|
||||
t.Stop()
|
||||
@@ -579,9 +579,9 @@ func (a *agent) reportMetadata(ctx context.Context, aAPI proto.DRPCAgentClient28
|
||||
wake(collect)
|
||||
}
|
||||
}
|
||||
})
|
||||
}()
|
||||
|
||||
agentutil.Go(ctx, a.logger, func() {
|
||||
go func() {
|
||||
defer close(collectDone)
|
||||
|
||||
var (
|
||||
@@ -628,7 +628,7 @@ func (a *agent) reportMetadata(ctx context.Context, aAPI proto.DRPCAgentClient28
|
||||
// We send the result to the channel in the goroutine to avoid
|
||||
// sending the same result multiple times. So, we don't care about
|
||||
// the return values.
|
||||
agentutil.Go(ctx, a.logger, func() { flight.Do(md.Key, func() {
|
||||
go flight.Do(md.Key, func() {
|
||||
ctx := slog.With(ctx, slog.F("key", md.Key))
|
||||
lastCollectedAtMu.RLock()
|
||||
collectedAt, ok := lastCollectedAts[md.Key]
|
||||
@@ -681,10 +681,10 @@ func (a *agent) reportMetadata(ctx context.Context, aAPI proto.DRPCAgentClient28
|
||||
lastCollectedAts[md.Key] = now
|
||||
lastCollectedAtMu.Unlock()
|
||||
}
|
||||
}) })
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
}()
|
||||
|
||||
// Gather metadata updates and report them once every interval. If a
|
||||
// previous report is in flight, wait for it to complete before
|
||||
@@ -735,14 +735,14 @@ func (a *agent) reportMetadata(ctx context.Context, aAPI proto.DRPCAgentClient28
|
||||
}
|
||||
|
||||
reportInFlight = true
|
||||
agentutil.Go(ctx, a.logger, func() {
|
||||
go func() {
|
||||
a.logger.Debug(ctx, "batch updating metadata")
|
||||
ctx, cancel := context.WithTimeout(ctx, reportTimeout)
|
||||
defer cancel()
|
||||
|
||||
_, err := aAPI.BatchUpdateMetadata(ctx, &proto.BatchUpdateMetadataRequest{Metadata: metadata})
|
||||
reportError <- err
|
||||
})
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -882,7 +882,7 @@ const (
|
||||
reportConnectionBufferLimit = 2048
|
||||
)
|
||||
|
||||
func (a *agent) reportConnection(id uuid.UUID, connectionType proto.Connection_Type, ip string) (disconnected func(code int, reason string)) {
|
||||
func (a *agent) reportConnection(id uuid.UUID, connectionType proto.Connection_Type, ip string, options ...func(*proto.Connection)) (disconnected func(code int, reason string)) {
|
||||
// A blank IP can unfortunately happen if the connection is broken in a data race before we get to introspect it. We
|
||||
// still report it, and the recipient can handle a blank IP.
|
||||
if ip != "" {
|
||||
@@ -913,16 +913,20 @@ func (a *agent) reportConnection(id uuid.UUID, connectionType proto.Connection_T
|
||||
slog.F("ip", ip),
|
||||
)
|
||||
} else {
|
||||
connectMsg := &proto.Connection{
|
||||
Id: id[:],
|
||||
Action: proto.Connection_CONNECT,
|
||||
Type: connectionType,
|
||||
Timestamp: timestamppb.New(time.Now()),
|
||||
Ip: ip,
|
||||
StatusCode: 0,
|
||||
Reason: nil,
|
||||
}
|
||||
for _, opt := range options {
|
||||
opt(connectMsg)
|
||||
}
|
||||
a.reportConnections = append(a.reportConnections, &proto.ReportConnectionRequest{
|
||||
Connection: &proto.Connection{
|
||||
Id: id[:],
|
||||
Action: proto.Connection_CONNECT,
|
||||
Type: connectionType,
|
||||
Timestamp: timestamppb.New(time.Now()),
|
||||
Ip: ip,
|
||||
StatusCode: 0,
|
||||
Reason: nil,
|
||||
},
|
||||
Connection: connectMsg,
|
||||
})
|
||||
select {
|
||||
case a.reportConnectionsUpdate <- struct{}{}:
|
||||
@@ -943,16 +947,20 @@ func (a *agent) reportConnection(id uuid.UUID, connectionType proto.Connection_T
|
||||
return
|
||||
}
|
||||
|
||||
disconnMsg := &proto.Connection{
|
||||
Id: id[:],
|
||||
Action: proto.Connection_DISCONNECT,
|
||||
Type: connectionType,
|
||||
Timestamp: timestamppb.New(time.Now()),
|
||||
Ip: ip,
|
||||
StatusCode: int32(code), //nolint:gosec
|
||||
Reason: &reason,
|
||||
}
|
||||
for _, opt := range options {
|
||||
opt(disconnMsg)
|
||||
}
|
||||
a.reportConnections = append(a.reportConnections, &proto.ReportConnectionRequest{
|
||||
Connection: &proto.Connection{
|
||||
Id: id[:],
|
||||
Action: proto.Connection_DISCONNECT,
|
||||
Type: connectionType,
|
||||
Timestamp: timestamppb.New(time.Now()),
|
||||
Ip: ip,
|
||||
StatusCode: int32(code), //nolint:gosec
|
||||
Reason: &reason,
|
||||
},
|
||||
Connection: disconnMsg,
|
||||
})
|
||||
select {
|
||||
case a.reportConnectionsUpdate <- struct{}{}:
|
||||
@@ -1378,6 +1386,8 @@ func (a *agent) createOrUpdateNetwork(manifestOK, networkOK *checkpoint) func(co
|
||||
manifest.DERPForceWebSockets,
|
||||
manifest.DisableDirectConnections,
|
||||
keySeed,
|
||||
manifest.WorkspaceName,
|
||||
manifest.Apps,
|
||||
)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create tailnet: %w", err)
|
||||
@@ -1519,19 +1529,46 @@ func (a *agent) trackGoroutine(fn func()) error {
|
||||
return xerrors.Errorf("track conn goroutine: %w", ErrAgentClosing)
|
||||
}
|
||||
a.closeWaitGroup.Add(1)
|
||||
agentutil.Go(a.hardCtx, a.logger, func() {
|
||||
go func() {
|
||||
defer a.closeWaitGroup.Done()
|
||||
fn()
|
||||
})
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// appPortFromURL extracts the port from a workspace app URL,
|
||||
// defaulting to 80/443 by scheme.
|
||||
func appPortFromURL(rawURL string) uint16 {
|
||||
u, err := url.Parse(rawURL)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
p := u.Port()
|
||||
if p == "" {
|
||||
switch u.Scheme {
|
||||
case "http":
|
||||
return 80
|
||||
case "https":
|
||||
return 443
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
port, err := strconv.ParseUint(p, 10, 16)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return uint16(port)
|
||||
}
|
||||
|
||||
func (a *agent) createTailnet(
|
||||
ctx context.Context,
|
||||
agentID uuid.UUID,
|
||||
derpMap *tailcfg.DERPMap,
|
||||
derpForceWebSockets, disableDirectConnections bool,
|
||||
keySeed int64,
|
||||
workspaceName string,
|
||||
apps []codersdk.WorkspaceApp,
|
||||
) (_ *tailnet.Conn, err error) {
|
||||
// Inject `CODER_AGENT_HEADER` into the DERP header.
|
||||
var header http.Header
|
||||
@@ -1540,6 +1577,18 @@ func (a *agent) createTailnet(
|
||||
header = headerTransport.Header
|
||||
}
|
||||
}
|
||||
|
||||
// Build port-to-app mapping for workspace app connection tracking
|
||||
// via the tailnet callback.
|
||||
portToApp := make(map[uint16]codersdk.WorkspaceApp)
|
||||
for _, app := range apps {
|
||||
port := appPortFromURL(app.URL)
|
||||
if port == 0 || app.External {
|
||||
continue
|
||||
}
|
||||
portToApp[port] = app
|
||||
}
|
||||
|
||||
network, err := tailnet.NewConn(&tailnet.Options{
|
||||
ID: agentID,
|
||||
Addresses: a.wireguardAddresses(agentID),
|
||||
@@ -1549,6 +1598,27 @@ func (a *agent) createTailnet(
|
||||
Logger: a.logger.Named("net.tailnet"),
|
||||
ListenPort: a.tailnetListenPort,
|
||||
BlockEndpoints: disableDirectConnections,
|
||||
ShortDescription: "Workspace Agent",
|
||||
Hostname: workspaceName,
|
||||
TCPConnCallback: func(src, dst netip.AddrPort) (disconnected func(int, string)) {
|
||||
app, ok := portToApp[dst.Port()]
|
||||
connType := proto.Connection_PORT_FORWARDING
|
||||
slugOrPort := strconv.Itoa(int(dst.Port()))
|
||||
if ok {
|
||||
connType = proto.Connection_WORKSPACE_APP
|
||||
if app.Slug != "" {
|
||||
slugOrPort = app.Slug
|
||||
}
|
||||
}
|
||||
return a.reportConnection(
|
||||
uuid.New(),
|
||||
connType,
|
||||
src.String(),
|
||||
func(c *proto.Connection) {
|
||||
c.SlugOrPort = &slugOrPort
|
||||
},
|
||||
)
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("create tailnet: %w", err)
|
||||
@@ -1626,15 +1696,15 @@ func (a *agent) createTailnet(
|
||||
clog.Info(ctx, "accepted conn")
|
||||
wg.Add(1)
|
||||
closed := make(chan struct{})
|
||||
agentutil.Go(ctx, clog, func() {
|
||||
go func() {
|
||||
select {
|
||||
case <-closed:
|
||||
case <-a.hardCtx.Done():
|
||||
_ = conn.Close()
|
||||
}
|
||||
wg.Done()
|
||||
})
|
||||
agentutil.Go(ctx, clog, func() {
|
||||
}()
|
||||
go func() {
|
||||
defer close(closed)
|
||||
sErr := speedtest.ServeConn(conn)
|
||||
if sErr != nil {
|
||||
@@ -1642,7 +1712,7 @@ func (a *agent) createTailnet(
|
||||
return
|
||||
}
|
||||
clog.Info(ctx, "test ended")
|
||||
})
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}); err != nil {
|
||||
@@ -1669,13 +1739,13 @@ func (a *agent) createTailnet(
|
||||
WriteTimeout: 20 * time.Second,
|
||||
ErrorLog: slog.Stdlib(ctx, a.logger.Named("http_api_server"), slog.LevelInfo),
|
||||
}
|
||||
agentutil.Go(ctx, a.logger, func() {
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-a.hardCtx.Done():
|
||||
}
|
||||
_ = server.Close()
|
||||
})
|
||||
}()
|
||||
|
||||
apiServErr := server.Serve(apiListener)
|
||||
if apiServErr != nil && !xerrors.Is(apiServErr, http.ErrServerClosed) && !strings.Contains(apiServErr.Error(), "use of closed network connection") {
|
||||
@@ -1717,7 +1787,7 @@ func (a *agent) runCoordinator(ctx context.Context, tClient tailnetproto.DRPCTai
|
||||
coordination := ctrl.New(coordinate)
|
||||
|
||||
errCh := make(chan error, 1)
|
||||
agentutil.Go(ctx, a.logger, func() {
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -1729,7 +1799,7 @@ func (a *agent) runCoordinator(ctx context.Context, tClient tailnetproto.DRPCTai
|
||||
case err := <-coordination.Wait():
|
||||
errCh <- err
|
||||
}
|
||||
})
|
||||
}()
|
||||
return <-errCh
|
||||
}
|
||||
|
||||
@@ -1820,7 +1890,7 @@ func (a *agent) Collect(ctx context.Context, networkStats map[netlogtype.Connect
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
agentutil.Go(pingCtx, a.logger, func() {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
duration, p2p, _, err := a.network.Ping(pingCtx, addresses[0].Addr())
|
||||
if err != nil {
|
||||
@@ -1834,7 +1904,7 @@ func (a *agent) Collect(ctx context.Context, networkStats map[netlogtype.Connect
|
||||
} else {
|
||||
derpConns++
|
||||
}
|
||||
})
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
sort.Float64s(durations)
|
||||
@@ -2032,13 +2102,13 @@ func (a *agent) Close() error {
|
||||
|
||||
// Wait for the graceful shutdown to complete, but don't wait forever so
|
||||
// that we don't break user expectations.
|
||||
agentutil.Go(a.hardCtx, a.logger, func() {
|
||||
go func() {
|
||||
defer a.hardCancel()
|
||||
select {
|
||||
case <-a.hardCtx.Done():
|
||||
case <-time.After(5 * time.Second):
|
||||
}
|
||||
})
|
||||
}()
|
||||
|
||||
// Wait for lifecycle to be reported
|
||||
lifecycleWaitLoop:
|
||||
@@ -2128,13 +2198,13 @@ const EnvAgentSubsystem = "CODER_AGENT_SUBSYSTEM"
|
||||
// eitherContext returns a context that is canceled when either context ends.
|
||||
func eitherContext(a, b context.Context) context.Context {
|
||||
ctx, cancel := context.WithCancel(a)
|
||||
agentutil.Go(ctx, slog.Logger{}, func() {
|
||||
go func() {
|
||||
defer cancel()
|
||||
select {
|
||||
case <-a.Done():
|
||||
case <-b.Done():
|
||||
}
|
||||
})
|
||||
}()
|
||||
return ctx
|
||||
}
|
||||
|
||||
|
||||
@@ -2843,6 +2843,102 @@ func TestAgent_Dial(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestAgent_PortForwardConnectionType verifies connection
|
||||
// type classification for forwarded TCP connections.
|
||||
func TestAgent_PortForwardConnectionType(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Start a TCP echo server for the "app" port.
|
||||
appListener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = appListener.Close() })
|
||||
appPort := appListener.Addr().(*net.TCPAddr).Port
|
||||
|
||||
// Start a TCP echo server for a non-app port.
|
||||
nonAppListener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = nonAppListener.Close() })
|
||||
nonAppPort := nonAppListener.Addr().(*net.TCPAddr).Port
|
||||
|
||||
echoOnce := func(l net.Listener) <-chan struct{} {
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
c, err := l.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer c.Close()
|
||||
_, _ = io.Copy(c, c)
|
||||
}()
|
||||
return done
|
||||
}
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
//nolint:dogsled
|
||||
agentConn, agentClient, _, _, _ := setupAgent(t, agentsdk.Manifest{
|
||||
Apps: []codersdk.WorkspaceApp{
|
||||
{
|
||||
ID: uuid.New(),
|
||||
Slug: "myapp",
|
||||
URL: fmt.Sprintf("http://localhost:%d", appPort),
|
||||
SharingLevel: codersdk.WorkspaceAppSharingLevelOwner,
|
||||
Health: codersdk.WorkspaceAppHealthDisabled,
|
||||
},
|
||||
},
|
||||
}, 0)
|
||||
require.True(t, agentConn.AwaitReachable(ctx))
|
||||
|
||||
// Phase 1: Connect to the app port, expect WORKSPACE_APP.
|
||||
appDone := echoOnce(appListener)
|
||||
conn, err := agentConn.DialContext(ctx, "tcp", appListener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
testDial(ctx, t, conn)
|
||||
_ = conn.Close()
|
||||
<-appDone
|
||||
|
||||
var reports []*proto.ReportConnectionRequest
|
||||
require.Eventually(t, func() bool {
|
||||
reports = agentClient.GetConnectionReports()
|
||||
return len(reports) >= 2
|
||||
}, testutil.WaitMedium, testutil.IntervalFast,
|
||||
"waiting for 2 connection reports for workspace app",
|
||||
)
|
||||
|
||||
require.Equal(t, proto.Connection_CONNECT, reports[0].GetConnection().GetAction())
|
||||
require.Equal(t, proto.Connection_WORKSPACE_APP, reports[0].GetConnection().GetType())
|
||||
require.Equal(t, "myapp", reports[0].GetConnection().GetSlugOrPort())
|
||||
|
||||
require.Equal(t, proto.Connection_DISCONNECT, reports[1].GetConnection().GetAction())
|
||||
require.Equal(t, proto.Connection_WORKSPACE_APP, reports[1].GetConnection().GetType())
|
||||
require.Equal(t, "myapp", reports[1].GetConnection().GetSlugOrPort())
|
||||
|
||||
// Phase 2: Connect to the non-app port, expect PORT_FORWARDING.
|
||||
nonAppDone := echoOnce(nonAppListener)
|
||||
conn, err = agentConn.DialContext(ctx, "tcp", nonAppListener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
testDial(ctx, t, conn)
|
||||
_ = conn.Close()
|
||||
<-nonAppDone
|
||||
|
||||
nonAppPortStr := strconv.Itoa(nonAppPort)
|
||||
require.Eventually(t, func() bool {
|
||||
reports = agentClient.GetConnectionReports()
|
||||
return len(reports) >= 4
|
||||
}, testutil.WaitMedium, testutil.IntervalFast,
|
||||
"waiting for 4 connection reports total",
|
||||
)
|
||||
|
||||
require.Equal(t, proto.Connection_CONNECT, reports[2].GetConnection().GetAction())
|
||||
require.Equal(t, proto.Connection_PORT_FORWARDING, reports[2].GetConnection().GetType())
|
||||
require.Equal(t, nonAppPortStr, reports[2].GetConnection().GetSlugOrPort())
|
||||
|
||||
require.Equal(t, proto.Connection_DISCONNECT, reports[3].GetConnection().GetAction())
|
||||
require.Equal(t, proto.Connection_PORT_FORWARDING, reports[3].GetConnection().GetType())
|
||||
require.Equal(t, nonAppPortStr, reports[3].GetConnection().GetSlugOrPort())
|
||||
}
|
||||
|
||||
// TestAgent_UpdatedDERP checks that agents can handle their DERP map being
|
||||
// updated, and that clients can also handle it.
|
||||
func TestAgent_UpdatedDERP(t *testing.T) {
|
||||
|
||||
Generated
+71
-2
@@ -1,9 +1,9 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: .. (interfaces: ContainerCLI,DevcontainerCLI)
|
||||
// Source: .. (interfaces: ContainerCLI,DevcontainerCLI,SubAgentClient)
|
||||
//
|
||||
// Generated by this command:
|
||||
//
|
||||
// mockgen -destination ./acmock.go -package acmock .. ContainerCLI,DevcontainerCLI
|
||||
// mockgen -destination ./acmock.go -package acmock .. ContainerCLI,DevcontainerCLI,SubAgentClient
|
||||
//
|
||||
|
||||
// Package acmock is a generated GoMock package.
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
|
||||
agentcontainers "github.com/coder/coder/v2/agent/agentcontainers"
|
||||
codersdk "github.com/coder/coder/v2/codersdk"
|
||||
uuid "github.com/google/uuid"
|
||||
gomock "go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
@@ -216,3 +217,71 @@ func (mr *MockDevcontainerCLIMockRecorder) Up(ctx, workspaceFolder, configPath a
|
||||
varargs := append([]any{ctx, workspaceFolder, configPath}, opts...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Up", reflect.TypeOf((*MockDevcontainerCLI)(nil).Up), varargs...)
|
||||
}
|
||||
|
||||
// MockSubAgentClient is a mock of SubAgentClient interface.
|
||||
type MockSubAgentClient struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockSubAgentClientMockRecorder
|
||||
isgomock struct{}
|
||||
}
|
||||
|
||||
// MockSubAgentClientMockRecorder is the mock recorder for MockSubAgentClient.
|
||||
type MockSubAgentClientMockRecorder struct {
|
||||
mock *MockSubAgentClient
|
||||
}
|
||||
|
||||
// NewMockSubAgentClient creates a new mock instance.
|
||||
func NewMockSubAgentClient(ctrl *gomock.Controller) *MockSubAgentClient {
|
||||
mock := &MockSubAgentClient{ctrl: ctrl}
|
||||
mock.recorder = &MockSubAgentClientMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockSubAgentClient) EXPECT() *MockSubAgentClientMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// Create mocks base method.
|
||||
func (m *MockSubAgentClient) Create(ctx context.Context, agent agentcontainers.SubAgent) (agentcontainers.SubAgent, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Create", ctx, agent)
|
||||
ret0, _ := ret[0].(agentcontainers.SubAgent)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Create indicates an expected call of Create.
|
||||
func (mr *MockSubAgentClientMockRecorder) Create(ctx, agent any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockSubAgentClient)(nil).Create), ctx, agent)
|
||||
}
|
||||
|
||||
// Delete mocks base method.
|
||||
func (m *MockSubAgentClient) Delete(ctx context.Context, id uuid.UUID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Delete", ctx, id)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Delete indicates an expected call of Delete.
|
||||
func (mr *MockSubAgentClientMockRecorder) Delete(ctx, id any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockSubAgentClient)(nil).Delete), ctx, id)
|
||||
}
|
||||
|
||||
// List mocks base method.
|
||||
func (m *MockSubAgentClient) List(ctx context.Context) ([]agentcontainers.SubAgent, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "List", ctx)
|
||||
ret0, _ := ret[0].([]agentcontainers.SubAgent)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// List indicates an expected call of List.
|
||||
func (mr *MockSubAgentClientMockRecorder) List(ctx any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockSubAgentClient)(nil).List), ctx)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package acmock contains a mock implementation of agentcontainers.Lister for use in tests.
|
||||
package acmock
|
||||
|
||||
//go:generate mockgen -destination ./acmock.go -package acmock .. ContainerCLI,DevcontainerCLI
|
||||
//go:generate mockgen -destination ./acmock.go -package acmock .. ContainerCLI,DevcontainerCLI,SubAgentClient
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/agent/agentcontainers/ignore"
|
||||
"github.com/coder/coder/v2/agent/agentutil"
|
||||
"github.com/coder/coder/v2/agent/agentcontainers/watcher"
|
||||
"github.com/coder/coder/v2/agent/agentexec"
|
||||
"github.com/coder/coder/v2/agent/usershell"
|
||||
@@ -563,10 +562,7 @@ func (api *API) discoverDevcontainersInProject(projectPath string) error {
|
||||
api.broadcastUpdatesLocked()
|
||||
|
||||
if dc.Status == codersdk.WorkspaceAgentDevcontainerStatusStarting {
|
||||
api.asyncWg.Add(1)
|
||||
agentutil.Go(api.ctx, api.logger, func() {
|
||||
defer api.asyncWg.Done()
|
||||
|
||||
api.asyncWg.Go(func() {
|
||||
_ = api.CreateDevcontainer(dc.WorkspaceFolder, dc.ConfigPath)
|
||||
})
|
||||
}
|
||||
@@ -1424,9 +1420,9 @@ func (api *API) handleDevcontainerRecreate(w http.ResponseWriter, r *http.Reques
|
||||
api.knownDevcontainers[dc.WorkspaceFolder] = dc
|
||||
api.broadcastUpdatesLocked()
|
||||
|
||||
agentutil.Go(ctx, api.logger, func() {
|
||||
go func() {
|
||||
_ = api.CreateDevcontainer(dc.WorkspaceFolder, dc.ConfigPath, WithRemoveExistingContainer())
|
||||
})
|
||||
}()
|
||||
|
||||
api.mu.Unlock()
|
||||
|
||||
@@ -1628,16 +1624,25 @@ func (api *API) cleanupSubAgents(ctx context.Context) error {
|
||||
api.mu.Lock()
|
||||
defer api.mu.Unlock()
|
||||
|
||||
injected := make(map[uuid.UUID]bool, len(api.injectedSubAgentProcs))
|
||||
// Collect all subagent IDs that should be kept:
|
||||
// 1. Subagents currently tracked by injectedSubAgentProcs
|
||||
// 2. Subagents referenced by known devcontainers from the manifest
|
||||
var keep []uuid.UUID
|
||||
for _, proc := range api.injectedSubAgentProcs {
|
||||
injected[proc.agent.ID] = true
|
||||
keep = append(keep, proc.agent.ID)
|
||||
}
|
||||
for _, dc := range api.knownDevcontainers {
|
||||
if dc.SubagentID.Valid {
|
||||
keep = append(keep, dc.SubagentID.UUID)
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, defaultOperationTimeout)
|
||||
defer cancel()
|
||||
|
||||
var errs []error
|
||||
for _, agent := range agents {
|
||||
if injected[agent.ID] {
|
||||
if slices.Contains(keep, agent.ID) {
|
||||
continue
|
||||
}
|
||||
client := *api.subAgentClient.Load()
|
||||
@@ -1648,10 +1653,11 @@ func (api *API) cleanupSubAgents(ctx context.Context) error {
|
||||
slog.F("agent_id", agent.ID),
|
||||
slog.F("agent_name", agent.Name),
|
||||
)
|
||||
errs = append(errs, xerrors.Errorf("delete agent %s (%s): %w", agent.Name, agent.ID, err))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
// maybeInjectSubAgentIntoContainerLocked injects a subagent into a dev
|
||||
@@ -2002,7 +2008,20 @@ func (api *API) maybeInjectSubAgentIntoContainerLocked(ctx context.Context, dc c
|
||||
// logger.Warn(ctx, "set CAP_NET_ADMIN on agent binary failed", slog.Error(err))
|
||||
// }
|
||||
|
||||
deleteSubAgent := proc.agent.ID != uuid.Nil && maybeRecreateSubAgent && !proc.agent.EqualConfig(subAgentConfig)
|
||||
// Only delete and recreate subagents that were dynamically created
|
||||
// (ID == uuid.Nil). Terraform-defined subagents (subAgentConfig.ID !=
|
||||
// uuid.Nil) must not be deleted because they have attached resources
|
||||
// managed by terraform.
|
||||
isTerraformManaged := subAgentConfig.ID != uuid.Nil
|
||||
configHasChanged := !proc.agent.EqualConfig(subAgentConfig)
|
||||
|
||||
logger.Debug(ctx, "checking if sub agent should be deleted",
|
||||
slog.F("is_terraform_managed", isTerraformManaged),
|
||||
slog.F("maybe_recreate_sub_agent", maybeRecreateSubAgent),
|
||||
slog.F("config_has_changed", configHasChanged),
|
||||
)
|
||||
|
||||
deleteSubAgent := !isTerraformManaged && maybeRecreateSubAgent && configHasChanged
|
||||
if deleteSubAgent {
|
||||
logger.Debug(ctx, "deleting existing subagent for recreation", slog.F("agent_id", proc.agent.ID))
|
||||
client := *api.subAgentClient.Load()
|
||||
@@ -2013,11 +2032,23 @@ func (api *API) maybeInjectSubAgentIntoContainerLocked(ctx context.Context, dc c
|
||||
proc.agent = SubAgent{} // Clear agent to signal that we need to create a new one.
|
||||
}
|
||||
|
||||
if proc.agent.ID == uuid.Nil {
|
||||
logger.Debug(ctx, "creating new subagent",
|
||||
slog.F("directory", subAgentConfig.Directory),
|
||||
slog.F("display_apps", subAgentConfig.DisplayApps),
|
||||
)
|
||||
// Re-create (upsert) terraform-managed subagents when the config
|
||||
// changes so that display apps and other settings are updated
|
||||
// without deleting the agent.
|
||||
recreateTerraformSubAgent := isTerraformManaged && maybeRecreateSubAgent && configHasChanged
|
||||
|
||||
if proc.agent.ID == uuid.Nil || recreateTerraformSubAgent {
|
||||
if recreateTerraformSubAgent {
|
||||
logger.Debug(ctx, "updating existing subagent",
|
||||
slog.F("directory", subAgentConfig.Directory),
|
||||
slog.F("display_apps", subAgentConfig.DisplayApps),
|
||||
)
|
||||
} else {
|
||||
logger.Debug(ctx, "creating new subagent",
|
||||
slog.F("directory", subAgentConfig.Directory),
|
||||
slog.F("display_apps", subAgentConfig.DisplayApps),
|
||||
)
|
||||
}
|
||||
|
||||
// Create new subagent record in the database to receive the auth token.
|
||||
// If we get a unique constraint violation, try with expanded names that
|
||||
|
||||
@@ -437,7 +437,11 @@ func (m *fakeSubAgentClient) Create(ctx context.Context, agent agentcontainers.S
|
||||
}
|
||||
}
|
||||
|
||||
agent.ID = uuid.New()
|
||||
// Only generate a new ID if one wasn't provided. Terraform-defined
|
||||
// subagents have pre-existing IDs that should be preserved.
|
||||
if agent.ID == uuid.Nil {
|
||||
agent.ID = uuid.New()
|
||||
}
|
||||
agent.AuthToken = uuid.New()
|
||||
if m.agents == nil {
|
||||
m.agents = make(map[uuid.UUID]agentcontainers.SubAgent)
|
||||
@@ -1035,6 +1039,30 @@ func TestAPI(t *testing.T) {
|
||||
wantStatus: []int{http.StatusAccepted, http.StatusConflict},
|
||||
wantBody: []string{"Devcontainer recreation initiated", "is currently starting and cannot be restarted"},
|
||||
},
|
||||
{
|
||||
name: "Terraform-defined devcontainer can be rebuilt",
|
||||
devcontainerID: devcontainerID1.String(),
|
||||
setupDevcontainers: []codersdk.WorkspaceAgentDevcontainer{
|
||||
{
|
||||
ID: devcontainerID1,
|
||||
Name: "test-devcontainer-terraform",
|
||||
WorkspaceFolder: workspaceFolder1,
|
||||
ConfigPath: configPath1,
|
||||
Status: codersdk.WorkspaceAgentDevcontainerStatusRunning,
|
||||
Container: &devContainer1,
|
||||
SubagentID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
},
|
||||
},
|
||||
lister: &fakeContainerCLI{
|
||||
containers: codersdk.WorkspaceAgentListContainersResponse{
|
||||
Containers: []codersdk.WorkspaceAgentContainer{devContainer1},
|
||||
},
|
||||
arch: "<none>",
|
||||
},
|
||||
devcontainerCLI: &fakeDevcontainerCLI{},
|
||||
wantStatus: []int{http.StatusAccepted, http.StatusConflict},
|
||||
wantBody: []string{"Devcontainer recreation initiated", "is currently starting and cannot be restarted"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -1449,14 +1477,6 @@ func TestAPI(t *testing.T) {
|
||||
)
|
||||
}
|
||||
|
||||
api := agentcontainers.NewAPI(logger, apiOpts...)
|
||||
|
||||
api.Start()
|
||||
defer api.Close()
|
||||
|
||||
r := chi.NewRouter()
|
||||
r.Mount("/", api.Routes())
|
||||
|
||||
var (
|
||||
agentRunningCh chan struct{}
|
||||
stopAgentCh chan struct{}
|
||||
@@ -1473,6 +1493,14 @@ func TestAPI(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
api := agentcontainers.NewAPI(logger, apiOpts...)
|
||||
|
||||
api.Start()
|
||||
defer api.Close()
|
||||
|
||||
r := chi.NewRouter()
|
||||
r.Mount("/", api.Routes())
|
||||
|
||||
tickerTrap.MustWait(ctx).MustRelease(ctx)
|
||||
tickerTrap.Close()
|
||||
|
||||
@@ -2490,6 +2518,338 @@ func TestAPI(t *testing.T) {
|
||||
assert.Empty(t, fakeSAC.agents)
|
||||
})
|
||||
|
||||
t.Run("SubAgentCleanupPreservesTerraformDefined", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
// Given: A terraform-defined agent and devcontainer that should be preserved
|
||||
terraformAgentID = uuid.New()
|
||||
terraformAgentToken = uuid.New()
|
||||
terraformAgent = agentcontainers.SubAgent{
|
||||
ID: terraformAgentID,
|
||||
Name: "terraform-defined-agent",
|
||||
Directory: "/workspace",
|
||||
AuthToken: terraformAgentToken,
|
||||
}
|
||||
terraformDevcontainer = codersdk.WorkspaceAgentDevcontainer{
|
||||
ID: uuid.New(),
|
||||
Name: "terraform-devcontainer",
|
||||
WorkspaceFolder: "/workspace/project",
|
||||
SubagentID: uuid.NullUUID{UUID: terraformAgentID, Valid: true},
|
||||
}
|
||||
|
||||
// Given: An orphaned agent that should be cleaned up
|
||||
orphanedAgentID = uuid.New()
|
||||
orphanedAgentToken = uuid.New()
|
||||
orphanedAgent = agentcontainers.SubAgent{
|
||||
ID: orphanedAgentID,
|
||||
Name: "orphaned-agent",
|
||||
Directory: "/tmp",
|
||||
AuthToken: orphanedAgentToken,
|
||||
}
|
||||
|
||||
ctx = testutil.Context(t, testutil.WaitMedium)
|
||||
logger = slog.Make()
|
||||
mClock = quartz.NewMock(t)
|
||||
mCCLI = acmock.NewMockContainerCLI(gomock.NewController(t))
|
||||
|
||||
fakeSAC = &fakeSubAgentClient{
|
||||
logger: logger.Named("fakeSubAgentClient"),
|
||||
agents: map[uuid.UUID]agentcontainers.SubAgent{
|
||||
terraformAgentID: terraformAgent,
|
||||
orphanedAgentID: orphanedAgent,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
mCCLI.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{
|
||||
Containers: []codersdk.WorkspaceAgentContainer{},
|
||||
}, nil).AnyTimes()
|
||||
|
||||
mClock.Set(time.Now()).MustWait(ctx)
|
||||
tickerTrap := mClock.Trap().TickerFunc("updaterLoop")
|
||||
|
||||
api := agentcontainers.NewAPI(logger,
|
||||
agentcontainers.WithClock(mClock),
|
||||
agentcontainers.WithContainerCLI(mCCLI),
|
||||
agentcontainers.WithSubAgentClient(fakeSAC),
|
||||
agentcontainers.WithDevcontainerCLI(&fakeDevcontainerCLI{}),
|
||||
agentcontainers.WithDevcontainers([]codersdk.WorkspaceAgentDevcontainer{terraformDevcontainer}, nil),
|
||||
)
|
||||
api.Start()
|
||||
defer api.Close()
|
||||
|
||||
tickerTrap.MustWait(ctx).MustRelease(ctx)
|
||||
tickerTrap.Close()
|
||||
|
||||
// When: We advance the clock, allowing cleanup to occur
|
||||
_, aw := mClock.AdvanceNext()
|
||||
aw.MustWait(ctx)
|
||||
|
||||
// Then: The orphaned agent should be deleted
|
||||
assert.Contains(t, fakeSAC.deleted, orphanedAgentID, "orphaned agent should be deleted")
|
||||
|
||||
// And: The terraform-defined agent should not be deleted
|
||||
assert.NotContains(t, fakeSAC.deleted, terraformAgentID, "terraform-defined agent should be preserved")
|
||||
assert.Len(t, fakeSAC.agents, 1, "only terraform agent should remain")
|
||||
assert.Contains(t, fakeSAC.agents, terraformAgentID, "terraform agent should still exist")
|
||||
})
|
||||
|
||||
t.Run("TerraformDefinedSubAgentNotRecreatedOnConfigChange", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Dev Container tests are not supported on Windows (this test uses mocks but fails due to Windows paths)")
|
||||
}
|
||||
|
||||
var (
|
||||
logger = slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
|
||||
mCtrl = gomock.NewController(t)
|
||||
|
||||
// Given: A terraform-defined devcontainer with a pre-assigned subagent ID.
|
||||
terraformAgentID = uuid.New()
|
||||
terraformContainer = codersdk.WorkspaceAgentContainer{
|
||||
ID: "test-container-id",
|
||||
FriendlyName: "test-container",
|
||||
Image: "test-image",
|
||||
Running: true,
|
||||
CreatedAt: time.Now(),
|
||||
Labels: map[string]string{
|
||||
agentcontainers.DevcontainerLocalFolderLabel: "/workspace/project",
|
||||
agentcontainers.DevcontainerConfigFileLabel: "/workspace/project/.devcontainer/devcontainer.json",
|
||||
},
|
||||
}
|
||||
terraformDevcontainer = codersdk.WorkspaceAgentDevcontainer{
|
||||
ID: uuid.New(),
|
||||
Name: "terraform-devcontainer",
|
||||
WorkspaceFolder: "/workspace/project",
|
||||
ConfigPath: "/workspace/project/.devcontainer/devcontainer.json",
|
||||
SubagentID: uuid.NullUUID{UUID: terraformAgentID, Valid: true},
|
||||
}
|
||||
|
||||
fCCLI = &fakeContainerCLI{
|
||||
containers: codersdk.WorkspaceAgentListContainersResponse{
|
||||
Containers: []codersdk.WorkspaceAgentContainer{terraformContainer},
|
||||
},
|
||||
arch: runtime.GOARCH,
|
||||
}
|
||||
|
||||
fDCCLI = &fakeDevcontainerCLI{
|
||||
upID: terraformContainer.ID,
|
||||
readConfig: agentcontainers.DevcontainerConfig{
|
||||
MergedConfiguration: agentcontainers.DevcontainerMergedConfiguration{
|
||||
Customizations: agentcontainers.DevcontainerMergedCustomizations{
|
||||
Coder: []agentcontainers.CoderCustomization{{
|
||||
Apps: []agentcontainers.SubAgentApp{{Slug: "app1"}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mSAC = acmock.NewMockSubAgentClient(mCtrl)
|
||||
closed bool
|
||||
)
|
||||
|
||||
mSAC.EXPECT().List(gomock.Any()).Return([]agentcontainers.SubAgent{}, nil).AnyTimes()
|
||||
|
||||
// EXPECT: Create is called twice with the terraform-defined ID:
|
||||
// once for the initial creation and once after the rebuild with
|
||||
// config changes (upsert).
|
||||
mSAC.EXPECT().Create(gomock.Any(), gomock.Any()).DoAndReturn(
|
||||
func(_ context.Context, agent agentcontainers.SubAgent) (agentcontainers.SubAgent, error) {
|
||||
assert.Equal(t, terraformAgentID, agent.ID, "agent should have terraform-defined ID")
|
||||
agent.AuthToken = uuid.New()
|
||||
return agent, nil
|
||||
},
|
||||
).Times(2)
|
||||
|
||||
// EXPECT: Delete may be called during Close, but not before.
|
||||
mSAC.EXPECT().Delete(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, _ uuid.UUID) error {
|
||||
assert.True(t, closed, "Delete should only be called after Close, not during recreation")
|
||||
return nil
|
||||
}).AnyTimes()
|
||||
|
||||
api := agentcontainers.NewAPI(logger,
|
||||
agentcontainers.WithContainerCLI(fCCLI),
|
||||
agentcontainers.WithDevcontainerCLI(fDCCLI),
|
||||
agentcontainers.WithDevcontainers(
|
||||
[]codersdk.WorkspaceAgentDevcontainer{terraformDevcontainer},
|
||||
[]codersdk.WorkspaceAgentScript{{ID: terraformDevcontainer.ID, LogSourceID: uuid.New()}},
|
||||
),
|
||||
agentcontainers.WithSubAgentClient(mSAC),
|
||||
agentcontainers.WithSubAgentURL("test-subagent-url"),
|
||||
agentcontainers.WithWatcher(watcher.NewNoop()),
|
||||
)
|
||||
api.Start()
|
||||
|
||||
// Given: We create the devcontainer for the first time.
|
||||
err := api.CreateDevcontainer(terraformDevcontainer.WorkspaceFolder, terraformDevcontainer.ConfigPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// When: The container is recreated (new container ID) with config changes.
|
||||
terraformContainer.ID = "new-container-id"
|
||||
fCCLI.containers.Containers = []codersdk.WorkspaceAgentContainer{terraformContainer}
|
||||
fDCCLI.upID = terraformContainer.ID
|
||||
fDCCLI.readConfig.MergedConfiguration.Customizations.Coder = []agentcontainers.CoderCustomization{{
|
||||
Apps: []agentcontainers.SubAgentApp{{Slug: "app2"}}, // Changed app triggers recreation logic.
|
||||
}}
|
||||
|
||||
err = api.CreateDevcontainer(terraformDevcontainer.WorkspaceFolder, terraformDevcontainer.ConfigPath, agentcontainers.WithRemoveExistingContainer())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: Mock expectations verify that Create was called once and Delete was not called during recreation.
|
||||
closed = true
|
||||
api.Close()
|
||||
})
|
||||
|
||||
// Verify that rebuilding a terraform-defined devcontainer via the
|
||||
// HTTP API does not delete the sub agent. The sub agent should be
|
||||
// preserved (Create called again with the same terraform ID) and
|
||||
// display app changes should be picked up.
|
||||
t.Run("TerraformDefinedSubAgentRebuildViaHTTP", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Dev Container tests are not supported on Windows (this test uses mocks but fails due to Windows paths)")
|
||||
}
|
||||
|
||||
var (
|
||||
ctx = testutil.Context(t, testutil.WaitMedium)
|
||||
logger = slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
|
||||
mCtrl = gomock.NewController(t)
|
||||
|
||||
terraformAgentID = uuid.New()
|
||||
containerID = "test-container-id"
|
||||
|
||||
terraformContainer = codersdk.WorkspaceAgentContainer{
|
||||
ID: containerID,
|
||||
FriendlyName: "test-container",
|
||||
Image: "test-image",
|
||||
Running: true,
|
||||
CreatedAt: time.Now(),
|
||||
Labels: map[string]string{
|
||||
agentcontainers.DevcontainerLocalFolderLabel: "/workspace/project",
|
||||
agentcontainers.DevcontainerConfigFileLabel: "/workspace/project/.devcontainer/devcontainer.json",
|
||||
},
|
||||
}
|
||||
terraformDevcontainer = codersdk.WorkspaceAgentDevcontainer{
|
||||
ID: uuid.New(),
|
||||
Name: "terraform-devcontainer",
|
||||
WorkspaceFolder: "/workspace/project",
|
||||
ConfigPath: "/workspace/project/.devcontainer/devcontainer.json",
|
||||
SubagentID: uuid.NullUUID{UUID: terraformAgentID, Valid: true},
|
||||
}
|
||||
|
||||
fCCLI = &fakeContainerCLI{
|
||||
containers: codersdk.WorkspaceAgentListContainersResponse{
|
||||
Containers: []codersdk.WorkspaceAgentContainer{terraformContainer},
|
||||
},
|
||||
arch: runtime.GOARCH,
|
||||
}
|
||||
|
||||
fDCCLI = &fakeDevcontainerCLI{
|
||||
upID: containerID,
|
||||
readConfig: agentcontainers.DevcontainerConfig{
|
||||
MergedConfiguration: agentcontainers.DevcontainerMergedConfiguration{
|
||||
Customizations: agentcontainers.DevcontainerMergedCustomizations{
|
||||
Coder: []agentcontainers.CoderCustomization{{
|
||||
DisplayApps: map[codersdk.DisplayApp]bool{
|
||||
codersdk.DisplayAppSSH: true,
|
||||
codersdk.DisplayAppWebTerminal: true,
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mSAC = acmock.NewMockSubAgentClient(mCtrl)
|
||||
closed bool
|
||||
|
||||
createCalled = make(chan agentcontainers.SubAgent, 2)
|
||||
)
|
||||
|
||||
mSAC.EXPECT().List(gomock.Any()).Return([]agentcontainers.SubAgent{}, nil).AnyTimes()
|
||||
|
||||
// Create should be called twice: once for the initial injection
|
||||
// and once after the rebuild picks up the new container.
|
||||
mSAC.EXPECT().Create(gomock.Any(), gomock.Any()).DoAndReturn(
|
||||
func(_ context.Context, agent agentcontainers.SubAgent) (agentcontainers.SubAgent, error) {
|
||||
assert.Equal(t, terraformAgentID, agent.ID, "agent should always use terraform-defined ID")
|
||||
agent.AuthToken = uuid.New()
|
||||
createCalled <- agent
|
||||
return agent, nil
|
||||
},
|
||||
).Times(2)
|
||||
|
||||
// Delete must only be called during Close, never during rebuild.
|
||||
mSAC.EXPECT().Delete(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, _ uuid.UUID) error {
|
||||
assert.True(t, closed, "Delete should only be called after Close, not during rebuild")
|
||||
return nil
|
||||
}).AnyTimes()
|
||||
|
||||
api := agentcontainers.NewAPI(logger,
|
||||
agentcontainers.WithContainerCLI(fCCLI),
|
||||
agentcontainers.WithDevcontainerCLI(fDCCLI),
|
||||
agentcontainers.WithDevcontainers(
|
||||
[]codersdk.WorkspaceAgentDevcontainer{terraformDevcontainer},
|
||||
[]codersdk.WorkspaceAgentScript{{ID: terraformDevcontainer.ID, LogSourceID: uuid.New()}},
|
||||
),
|
||||
agentcontainers.WithSubAgentClient(mSAC),
|
||||
agentcontainers.WithSubAgentURL("test-subagent-url"),
|
||||
agentcontainers.WithWatcher(watcher.NewNoop()),
|
||||
)
|
||||
api.Start()
|
||||
defer func() {
|
||||
closed = true
|
||||
api.Close()
|
||||
}()
|
||||
|
||||
r := chi.NewRouter()
|
||||
r.Mount("/", api.Routes())
|
||||
|
||||
// Perform the initial devcontainer creation directly to set up
|
||||
// the subagent (mirrors the TerraformDefinedSubAgentNotRecreatedOnConfigChange
|
||||
// test pattern).
|
||||
err := api.CreateDevcontainer(terraformDevcontainer.WorkspaceFolder, terraformDevcontainer.ConfigPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
initialAgent := testutil.RequireReceive(ctx, t, createCalled)
|
||||
assert.Equal(t, terraformAgentID, initialAgent.ID)
|
||||
|
||||
// Simulate container rebuild: new container ID, changed display apps.
|
||||
newContainerID := "new-container-id"
|
||||
terraformContainer.ID = newContainerID
|
||||
fCCLI.containers.Containers = []codersdk.WorkspaceAgentContainer{terraformContainer}
|
||||
fDCCLI.upID = newContainerID
|
||||
fDCCLI.readConfig.MergedConfiguration.Customizations.Coder = []agentcontainers.CoderCustomization{{
|
||||
DisplayApps: map[codersdk.DisplayApp]bool{
|
||||
codersdk.DisplayAppSSH: true,
|
||||
codersdk.DisplayAppWebTerminal: true,
|
||||
codersdk.DisplayAppVSCodeDesktop: true,
|
||||
codersdk.DisplayAppVSCodeInsiders: true,
|
||||
},
|
||||
}}
|
||||
|
||||
// Issue the rebuild request via the HTTP API.
|
||||
req := httptest.NewRequest(http.MethodPost, "/devcontainers/"+terraformDevcontainer.ID.String()+"/recreate", nil).
|
||||
WithContext(ctx)
|
||||
rec := httptest.NewRecorder()
|
||||
r.ServeHTTP(rec, req)
|
||||
require.Equal(t, http.StatusAccepted, rec.Code)
|
||||
|
||||
// Wait for the post-rebuild injection to complete.
|
||||
rebuiltAgent := testutil.RequireReceive(ctx, t, createCalled)
|
||||
assert.Equal(t, terraformAgentID, rebuiltAgent.ID, "rebuilt agent should preserve terraform ID")
|
||||
|
||||
// Verify that the display apps were updated.
|
||||
assert.Contains(t, rebuiltAgent.DisplayApps, codersdk.DisplayAppVSCodeDesktop,
|
||||
"rebuilt agent should include updated display apps")
|
||||
assert.Contains(t, rebuiltAgent.DisplayApps, codersdk.DisplayAppVSCodeInsiders,
|
||||
"rebuilt agent should include updated display apps")
|
||||
})
|
||||
|
||||
t.Run("Error", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
@@ -24,10 +24,12 @@ type SubAgent struct {
|
||||
DisplayApps []codersdk.DisplayApp
|
||||
}
|
||||
|
||||
// CloneConfig makes a copy of SubAgent without ID and AuthToken. The
|
||||
// name is inherited from the devcontainer.
|
||||
// CloneConfig makes a copy of SubAgent using configuration from the
|
||||
// devcontainer. The ID is inherited from dc.SubagentID if present, and
|
||||
// the name is inherited from the devcontainer. AuthToken is not copied.
|
||||
func (s SubAgent) CloneConfig(dc codersdk.WorkspaceAgentDevcontainer) SubAgent {
|
||||
return SubAgent{
|
||||
ID: dc.SubagentID.UUID,
|
||||
Name: dc.Name,
|
||||
Directory: s.Directory,
|
||||
Architecture: s.Architecture,
|
||||
@@ -190,6 +192,11 @@ func (a *subAgentAPIClient) List(ctx context.Context) ([]SubAgent, error) {
|
||||
func (a *subAgentAPIClient) Create(ctx context.Context, agent SubAgent) (_ SubAgent, err error) {
|
||||
a.logger.Debug(ctx, "creating sub agent", slog.F("name", agent.Name), slog.F("directory", agent.Directory))
|
||||
|
||||
var id []byte
|
||||
if agent.ID != uuid.Nil {
|
||||
id = agent.ID[:]
|
||||
}
|
||||
|
||||
displayApps := make([]agentproto.CreateSubAgentRequest_DisplayApp, 0, len(agent.DisplayApps))
|
||||
for _, displayApp := range agent.DisplayApps {
|
||||
var app agentproto.CreateSubAgentRequest_DisplayApp
|
||||
@@ -228,6 +235,7 @@ func (a *subAgentAPIClient) Create(ctx context.Context, agent SubAgent) (_ SubAg
|
||||
OperatingSystem: agent.OperatingSystem,
|
||||
DisplayApps: displayApps,
|
||||
Apps: apps,
|
||||
Id: id,
|
||||
})
|
||||
if err != nil {
|
||||
return SubAgent{}, err
|
||||
|
||||
@@ -306,3 +306,128 @@ func TestSubAgentClient_CreateWithDisplayApps(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubAgent_CloneConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("CopiesIDFromDevcontainer", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
subAgent := agentcontainers.SubAgent{
|
||||
ID: uuid.New(),
|
||||
Name: "original-name",
|
||||
Directory: "/workspace",
|
||||
Architecture: "amd64",
|
||||
OperatingSystem: "linux",
|
||||
DisplayApps: []codersdk.DisplayApp{codersdk.DisplayAppVSCodeDesktop},
|
||||
Apps: []agentcontainers.SubAgentApp{{Slug: "app1"}},
|
||||
}
|
||||
expectedID := uuid.MustParse("550e8400-e29b-41d4-a716-446655440000")
|
||||
dc := codersdk.WorkspaceAgentDevcontainer{
|
||||
Name: "devcontainer-name",
|
||||
SubagentID: uuid.NullUUID{UUID: expectedID, Valid: true},
|
||||
}
|
||||
|
||||
cloned := subAgent.CloneConfig(dc)
|
||||
|
||||
assert.Equal(t, expectedID, cloned.ID)
|
||||
assert.Equal(t, dc.Name, cloned.Name)
|
||||
assert.Equal(t, subAgent.Directory, cloned.Directory)
|
||||
assert.Zero(t, cloned.AuthToken, "AuthToken should not be copied")
|
||||
})
|
||||
|
||||
t.Run("HandlesNilSubagentID", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
subAgent := agentcontainers.SubAgent{
|
||||
ID: uuid.New(),
|
||||
Name: "original-name",
|
||||
Directory: "/workspace",
|
||||
Architecture: "amd64",
|
||||
OperatingSystem: "linux",
|
||||
}
|
||||
dc := codersdk.WorkspaceAgentDevcontainer{
|
||||
Name: "devcontainer-name",
|
||||
SubagentID: uuid.NullUUID{Valid: false},
|
||||
}
|
||||
|
||||
cloned := subAgent.CloneConfig(dc)
|
||||
|
||||
assert.Equal(t, uuid.Nil, cloned.ID)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubAgent_EqualConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
base := agentcontainers.SubAgent{
|
||||
ID: uuid.New(),
|
||||
Name: "test-agent",
|
||||
Directory: "/workspace",
|
||||
Architecture: "amd64",
|
||||
OperatingSystem: "linux",
|
||||
DisplayApps: []codersdk.DisplayApp{codersdk.DisplayAppVSCodeDesktop},
|
||||
Apps: []agentcontainers.SubAgentApp{
|
||||
{Slug: "test-app", DisplayName: "Test App"},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
modify func(*agentcontainers.SubAgent)
|
||||
wantEqual bool
|
||||
}{
|
||||
{
|
||||
name: "identical",
|
||||
modify: func(s *agentcontainers.SubAgent) {},
|
||||
wantEqual: true,
|
||||
},
|
||||
{
|
||||
name: "different ID",
|
||||
modify: func(s *agentcontainers.SubAgent) { s.ID = uuid.New() },
|
||||
wantEqual: true,
|
||||
},
|
||||
{
|
||||
name: "different Name",
|
||||
modify: func(s *agentcontainers.SubAgent) { s.Name = "different-name" },
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different Directory",
|
||||
modify: func(s *agentcontainers.SubAgent) { s.Directory = "/different/path" },
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different Architecture",
|
||||
modify: func(s *agentcontainers.SubAgent) { s.Architecture = "arm64" },
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different OperatingSystem",
|
||||
modify: func(s *agentcontainers.SubAgent) { s.OperatingSystem = "windows" },
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different DisplayApps",
|
||||
modify: func(s *agentcontainers.SubAgent) { s.DisplayApps = []codersdk.DisplayApp{codersdk.DisplayAppSSH} },
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different Apps",
|
||||
modify: func(s *agentcontainers.SubAgent) {
|
||||
s.Apps = []agentcontainers.SubAgentApp{{Slug: "different-app", DisplayName: "Different App"}}
|
||||
},
|
||||
wantEqual: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
modified := base
|
||||
tt.modify(&modified)
|
||||
assert.Equal(t, tt.wantEqual, base.EqualConfig(modified))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/agent/agentssh"
|
||||
"github.com/coder/coder/v2/agent/agentutil"
|
||||
"github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
@@ -474,10 +473,10 @@ func (r *Runner) trackCommandGoroutine(fn func()) error {
|
||||
return xerrors.New("track command goroutine: closed")
|
||||
}
|
||||
r.cmdCloseWait.Add(1)
|
||||
agentutil.Go(r.cronCtx, r.Logger, func() {
|
||||
go func() {
|
||||
defer r.cmdCloseWait.Done()
|
||||
fn()
|
||||
})
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -99,7 +99,10 @@ func (c *Client) SyncReady(ctx context.Context, unitName unit.ID) (bool, error)
|
||||
resp, err := c.client.SyncReady(ctx, &proto.SyncReadyRequest{
|
||||
Unit: string(unitName),
|
||||
})
|
||||
return resp.Ready, err
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("sync ready: %w", err)
|
||||
}
|
||||
return resp.Ready, nil
|
||||
}
|
||||
|
||||
// SyncStatus gets the status of a unit and its dependencies.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.30.0
|
||||
// protoc v4.23.4
|
||||
// protoc-gen-go v1.36.11
|
||||
// protoc v6.33.1
|
||||
// source: agent/agentsocket/proto/agentsocket.proto
|
||||
|
||||
package proto
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -21,18 +22,16 @@ const (
|
||||
)
|
||||
|
||||
type PingRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *PingRequest) Reset() {
|
||||
*x = PingRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *PingRequest) String() string {
|
||||
@@ -43,7 +42,7 @@ func (*PingRequest) ProtoMessage() {}
|
||||
|
||||
func (x *PingRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
@@ -59,18 +58,16 @@ func (*PingRequest) Descriptor() ([]byte, []int) {
|
||||
}
|
||||
|
||||
type PingResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *PingResponse) Reset() {
|
||||
*x = PingResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *PingResponse) String() string {
|
||||
@@ -81,7 +78,7 @@ func (*PingResponse) ProtoMessage() {}
|
||||
|
||||
func (x *PingResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
@@ -97,20 +94,17 @@ func (*PingResponse) Descriptor() ([]byte, []int) {
|
||||
}
|
||||
|
||||
type SyncStartRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *SyncStartRequest) Reset() {
|
||||
*x = SyncStartRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *SyncStartRequest) String() string {
|
||||
@@ -121,7 +115,7 @@ func (*SyncStartRequest) ProtoMessage() {}
|
||||
|
||||
func (x *SyncStartRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
@@ -144,18 +138,16 @@ func (x *SyncStartRequest) GetUnit() string {
|
||||
}
|
||||
|
||||
type SyncStartResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *SyncStartResponse) Reset() {
|
||||
*x = SyncStartResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *SyncStartResponse) String() string {
|
||||
@@ -166,7 +158,7 @@ func (*SyncStartResponse) ProtoMessage() {}
|
||||
|
||||
func (x *SyncStartResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
@@ -182,21 +174,18 @@ func (*SyncStartResponse) Descriptor() ([]byte, []int) {
|
||||
}
|
||||
|
||||
type SyncWantRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
|
||||
DependsOn string `protobuf:"bytes,2,opt,name=depends_on,json=dependsOn,proto3" json:"depends_on,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
|
||||
DependsOn string `protobuf:"bytes,2,opt,name=depends_on,json=dependsOn,proto3" json:"depends_on,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *SyncWantRequest) Reset() {
|
||||
*x = SyncWantRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *SyncWantRequest) String() string {
|
||||
@@ -207,7 +196,7 @@ func (*SyncWantRequest) ProtoMessage() {}
|
||||
|
||||
func (x *SyncWantRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
@@ -237,18 +226,16 @@ func (x *SyncWantRequest) GetDependsOn() string {
|
||||
}
|
||||
|
||||
type SyncWantResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *SyncWantResponse) Reset() {
|
||||
*x = SyncWantResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *SyncWantResponse) String() string {
|
||||
@@ -259,7 +246,7 @@ func (*SyncWantResponse) ProtoMessage() {}
|
||||
|
||||
func (x *SyncWantResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
@@ -275,20 +262,17 @@ func (*SyncWantResponse) Descriptor() ([]byte, []int) {
|
||||
}
|
||||
|
||||
type SyncCompleteRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *SyncCompleteRequest) Reset() {
|
||||
*x = SyncCompleteRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *SyncCompleteRequest) String() string {
|
||||
@@ -299,7 +283,7 @@ func (*SyncCompleteRequest) ProtoMessage() {}
|
||||
|
||||
func (x *SyncCompleteRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[6]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
@@ -322,18 +306,16 @@ func (x *SyncCompleteRequest) GetUnit() string {
|
||||
}
|
||||
|
||||
type SyncCompleteResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *SyncCompleteResponse) Reset() {
|
||||
*x = SyncCompleteResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *SyncCompleteResponse) String() string {
|
||||
@@ -344,7 +326,7 @@ func (*SyncCompleteResponse) ProtoMessage() {}
|
||||
|
||||
func (x *SyncCompleteResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[7]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
@@ -360,20 +342,17 @@ func (*SyncCompleteResponse) Descriptor() ([]byte, []int) {
|
||||
}
|
||||
|
||||
type SyncReadyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *SyncReadyRequest) Reset() {
|
||||
*x = SyncReadyRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *SyncReadyRequest) String() string {
|
||||
@@ -384,7 +363,7 @@ func (*SyncReadyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *SyncReadyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[8]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
@@ -407,20 +386,17 @@ func (x *SyncReadyRequest) GetUnit() string {
|
||||
}
|
||||
|
||||
type SyncReadyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Ready bool `protobuf:"varint,1,opt,name=ready,proto3" json:"ready,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Ready bool `protobuf:"varint,1,opt,name=ready,proto3" json:"ready,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *SyncReadyResponse) Reset() {
|
||||
*x = SyncReadyResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[9]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[9]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *SyncReadyResponse) String() string {
|
||||
@@ -431,7 +407,7 @@ func (*SyncReadyResponse) ProtoMessage() {}
|
||||
|
||||
func (x *SyncReadyResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[9]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
@@ -454,20 +430,17 @@ func (x *SyncReadyResponse) GetReady() bool {
|
||||
}
|
||||
|
||||
type SyncStatusRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *SyncStatusRequest) Reset() {
|
||||
*x = SyncStatusRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[10]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[10]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *SyncStatusRequest) String() string {
|
||||
@@ -478,7 +451,7 @@ func (*SyncStatusRequest) ProtoMessage() {}
|
||||
|
||||
func (x *SyncStatusRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[10]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
@@ -501,24 +474,21 @@ func (x *SyncStatusRequest) GetUnit() string {
|
||||
}
|
||||
|
||||
type DependencyInfo struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
|
||||
DependsOn string `protobuf:"bytes,2,opt,name=depends_on,json=dependsOn,proto3" json:"depends_on,omitempty"`
|
||||
RequiredStatus string `protobuf:"bytes,3,opt,name=required_status,json=requiredStatus,proto3" json:"required_status,omitempty"`
|
||||
CurrentStatus string `protobuf:"bytes,4,opt,name=current_status,json=currentStatus,proto3" json:"current_status,omitempty"`
|
||||
IsSatisfied bool `protobuf:"varint,5,opt,name=is_satisfied,json=isSatisfied,proto3" json:"is_satisfied,omitempty"`
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
|
||||
DependsOn string `protobuf:"bytes,2,opt,name=depends_on,json=dependsOn,proto3" json:"depends_on,omitempty"`
|
||||
RequiredStatus string `protobuf:"bytes,3,opt,name=required_status,json=requiredStatus,proto3" json:"required_status,omitempty"`
|
||||
CurrentStatus string `protobuf:"bytes,4,opt,name=current_status,json=currentStatus,proto3" json:"current_status,omitempty"`
|
||||
IsSatisfied bool `protobuf:"varint,5,opt,name=is_satisfied,json=isSatisfied,proto3" json:"is_satisfied,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *DependencyInfo) Reset() {
|
||||
*x = DependencyInfo{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[11]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[11]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *DependencyInfo) String() string {
|
||||
@@ -529,7 +499,7 @@ func (*DependencyInfo) ProtoMessage() {}
|
||||
|
||||
func (x *DependencyInfo) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[11]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
@@ -580,22 +550,19 @@ func (x *DependencyInfo) GetIsSatisfied() bool {
|
||||
}
|
||||
|
||||
type SyncStatusResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"`
|
||||
IsReady bool `protobuf:"varint,2,opt,name=is_ready,json=isReady,proto3" json:"is_ready,omitempty"`
|
||||
Dependencies []*DependencyInfo `protobuf:"bytes,3,rep,name=dependencies,proto3" json:"dependencies,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"`
|
||||
IsReady bool `protobuf:"varint,2,opt,name=is_ready,json=isReady,proto3" json:"is_ready,omitempty"`
|
||||
Dependencies []*DependencyInfo `protobuf:"bytes,3,rep,name=dependencies,proto3" json:"dependencies,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *SyncStatusResponse) Reset() {
|
||||
*x = SyncStatusResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[12]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[12]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *SyncStatusResponse) String() string {
|
||||
@@ -606,7 +573,7 @@ func (*SyncStatusResponse) ProtoMessage() {}
|
||||
|
||||
func (x *SyncStatusResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[12]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
@@ -644,111 +611,62 @@ func (x *SyncStatusResponse) GetDependencies() []*DependencyInfo {
|
||||
|
||||
var File_agent_agentsocket_proto_agentsocket_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_agent_agentsocket_proto_agentsocket_proto_rawDesc = []byte{
|
||||
0x0a, 0x29, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63,
|
||||
0x6b, 0x65, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73,
|
||||
0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x63, 0x6f, 0x64,
|
||||
0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x76,
|
||||
0x31, 0x22, 0x0d, 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x22, 0x0e, 0x0a, 0x0c, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x22, 0x26, 0x0a, 0x10, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x22, 0x13, 0x0a, 0x11, 0x53, 0x79, 0x6e, 0x63,
|
||||
0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x44, 0x0a,
|
||||
0x0f, 0x53, 0x79, 0x6e, 0x63, 0x57, 0x61, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
|
||||
0x75, 0x6e, 0x69, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x73, 0x5f,
|
||||
0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64,
|
||||
0x73, 0x4f, 0x6e, 0x22, 0x12, 0x0a, 0x10, 0x53, 0x79, 0x6e, 0x63, 0x57, 0x61, 0x6e, 0x74, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x0a, 0x13, 0x53, 0x79, 0x6e, 0x63, 0x43,
|
||||
0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12,
|
||||
0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e,
|
||||
0x69, 0x74, 0x22, 0x16, 0x0a, 0x14, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65,
|
||||
0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x0a, 0x10, 0x53, 0x79,
|
||||
0x6e, 0x63, 0x52, 0x65, 0x61, 0x64, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12,
|
||||
0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e,
|
||||
0x69, 0x74, 0x22, 0x29, 0x0a, 0x11, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x61, 0x64, 0x79, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x22, 0x27, 0x0a,
|
||||
0x11, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x22, 0xb6, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x70, 0x65, 0x6e,
|
||||
0x64, 0x65, 0x6e, 0x63, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69,
|
||||
0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x12, 0x1d, 0x0a,
|
||||
0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x73, 0x5f, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x09, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x73, 0x4f, 0x6e, 0x12, 0x27, 0x0a, 0x0f,
|
||||
0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
|
||||
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x53,
|
||||
0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74,
|
||||
0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63,
|
||||
0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x21, 0x0a, 0x0c,
|
||||
0x69, 0x73, 0x5f, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01,
|
||||
0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x53, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x64, 0x22,
|
||||
0x91, 0x01, 0x0a, 0x12, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19,
|
||||
0x0a, 0x08, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
|
||||
0x52, 0x07, 0x69, 0x73, 0x52, 0x65, 0x61, 0x64, 0x79, 0x12, 0x48, 0x0a, 0x0c, 0x64, 0x65, 0x70,
|
||||
0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32,
|
||||
0x24, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63,
|
||||
0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63,
|
||||
0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0c, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63,
|
||||
0x69, 0x65, 0x73, 0x32, 0xbb, 0x04, 0x0a, 0x0b, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x63,
|
||||
0x6b, 0x65, 0x74, 0x12, 0x4d, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x21, 0x2e, 0x63, 0x6f,
|
||||
0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22,
|
||||
0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b,
|
||||
0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x12, 0x5c, 0x0a, 0x09, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12,
|
||||
0x26, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63,
|
||||
0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e,
|
||||
0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53,
|
||||
0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x12, 0x59, 0x0a, 0x08, 0x53, 0x79, 0x6e, 0x63, 0x57, 0x61, 0x6e, 0x74, 0x12, 0x25, 0x2e, 0x63,
|
||||
0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x57, 0x61, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e,
|
||||
0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x57,
|
||||
0x61, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x0c, 0x53,
|
||||
0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x29, 0x2e, 0x63, 0x6f,
|
||||
0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61,
|
||||
0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79,
|
||||
0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x12, 0x5c, 0x0a, 0x09, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x61, 0x64, 0x79, 0x12,
|
||||
0x26, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63,
|
||||
0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x61, 0x64, 0x79,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e,
|
||||
0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53,
|
||||
0x79, 0x6e, 0x63, 0x52, 0x65, 0x61, 0x64, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x12, 0x5f, 0x0a, 0x0a, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x27,
|
||||
0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b,
|
||||
0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e,
|
||||
0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53,
|
||||
0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
|
||||
0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x61,
|
||||
0x67, 0x65, 0x6e, 0x74, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74,
|
||||
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
const file_agent_agentsocket_proto_agentsocket_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
")agent/agentsocket/proto/agentsocket.proto\x12\x14coder.agentsocket.v1\"\r\n" +
|
||||
"\vPingRequest\"\x0e\n" +
|
||||
"\fPingResponse\"&\n" +
|
||||
"\x10SyncStartRequest\x12\x12\n" +
|
||||
"\x04unit\x18\x01 \x01(\tR\x04unit\"\x13\n" +
|
||||
"\x11SyncStartResponse\"D\n" +
|
||||
"\x0fSyncWantRequest\x12\x12\n" +
|
||||
"\x04unit\x18\x01 \x01(\tR\x04unit\x12\x1d\n" +
|
||||
"\n" +
|
||||
"depends_on\x18\x02 \x01(\tR\tdependsOn\"\x12\n" +
|
||||
"\x10SyncWantResponse\")\n" +
|
||||
"\x13SyncCompleteRequest\x12\x12\n" +
|
||||
"\x04unit\x18\x01 \x01(\tR\x04unit\"\x16\n" +
|
||||
"\x14SyncCompleteResponse\"&\n" +
|
||||
"\x10SyncReadyRequest\x12\x12\n" +
|
||||
"\x04unit\x18\x01 \x01(\tR\x04unit\")\n" +
|
||||
"\x11SyncReadyResponse\x12\x14\n" +
|
||||
"\x05ready\x18\x01 \x01(\bR\x05ready\"'\n" +
|
||||
"\x11SyncStatusRequest\x12\x12\n" +
|
||||
"\x04unit\x18\x01 \x01(\tR\x04unit\"\xb6\x01\n" +
|
||||
"\x0eDependencyInfo\x12\x12\n" +
|
||||
"\x04unit\x18\x01 \x01(\tR\x04unit\x12\x1d\n" +
|
||||
"\n" +
|
||||
"depends_on\x18\x02 \x01(\tR\tdependsOn\x12'\n" +
|
||||
"\x0frequired_status\x18\x03 \x01(\tR\x0erequiredStatus\x12%\n" +
|
||||
"\x0ecurrent_status\x18\x04 \x01(\tR\rcurrentStatus\x12!\n" +
|
||||
"\fis_satisfied\x18\x05 \x01(\bR\visSatisfied\"\x91\x01\n" +
|
||||
"\x12SyncStatusResponse\x12\x16\n" +
|
||||
"\x06status\x18\x01 \x01(\tR\x06status\x12\x19\n" +
|
||||
"\bis_ready\x18\x02 \x01(\bR\aisReady\x12H\n" +
|
||||
"\fdependencies\x18\x03 \x03(\v2$.coder.agentsocket.v1.DependencyInfoR\fdependencies2\xbb\x04\n" +
|
||||
"\vAgentSocket\x12M\n" +
|
||||
"\x04Ping\x12!.coder.agentsocket.v1.PingRequest\x1a\".coder.agentsocket.v1.PingResponse\x12\\\n" +
|
||||
"\tSyncStart\x12&.coder.agentsocket.v1.SyncStartRequest\x1a'.coder.agentsocket.v1.SyncStartResponse\x12Y\n" +
|
||||
"\bSyncWant\x12%.coder.agentsocket.v1.SyncWantRequest\x1a&.coder.agentsocket.v1.SyncWantResponse\x12e\n" +
|
||||
"\fSyncComplete\x12).coder.agentsocket.v1.SyncCompleteRequest\x1a*.coder.agentsocket.v1.SyncCompleteResponse\x12\\\n" +
|
||||
"\tSyncReady\x12&.coder.agentsocket.v1.SyncReadyRequest\x1a'.coder.agentsocket.v1.SyncReadyResponse\x12_\n" +
|
||||
"\n" +
|
||||
"SyncStatus\x12'.coder.agentsocket.v1.SyncStatusRequest\x1a(.coder.agentsocket.v1.SyncStatusResponseB3Z1github.com/coder/coder/v2/agent/agentsocket/protob\x06proto3"
|
||||
|
||||
var (
|
||||
file_agent_agentsocket_proto_agentsocket_proto_rawDescOnce sync.Once
|
||||
file_agent_agentsocket_proto_agentsocket_proto_rawDescData = file_agent_agentsocket_proto_agentsocket_proto_rawDesc
|
||||
file_agent_agentsocket_proto_agentsocket_proto_rawDescData []byte
|
||||
)
|
||||
|
||||
func file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP() []byte {
|
||||
file_agent_agentsocket_proto_agentsocket_proto_rawDescOnce.Do(func() {
|
||||
file_agent_agentsocket_proto_agentsocket_proto_rawDescData = protoimpl.X.CompressGZIP(file_agent_agentsocket_proto_agentsocket_proto_rawDescData)
|
||||
file_agent_agentsocket_proto_agentsocket_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agent_agentsocket_proto_agentsocket_proto_rawDesc), len(file_agent_agentsocket_proto_agentsocket_proto_rawDesc)))
|
||||
})
|
||||
return file_agent_agentsocket_proto_agentsocket_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_agent_agentsocket_proto_agentsocket_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
|
||||
var file_agent_agentsocket_proto_agentsocket_proto_goTypes = []interface{}{
|
||||
var file_agent_agentsocket_proto_agentsocket_proto_goTypes = []any{
|
||||
(*PingRequest)(nil), // 0: coder.agentsocket.v1.PingRequest
|
||||
(*PingResponse)(nil), // 1: coder.agentsocket.v1.PingResponse
|
||||
(*SyncStartRequest)(nil), // 2: coder.agentsocket.v1.SyncStartRequest
|
||||
@@ -789,169 +707,11 @@ func file_agent_agentsocket_proto_agentsocket_proto_init() {
|
||||
if File_agent_agentsocket_proto_agentsocket_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PingRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PingResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncStartRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncStartResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncWantRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncWantResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncCompleteRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncCompleteResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncReadyRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncReadyResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncStatusRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DependencyInfo); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncStatusResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_agent_agentsocket_proto_agentsocket_proto_rawDesc,
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_agent_agentsocket_proto_agentsocket_proto_rawDesc), len(file_agent_agentsocket_proto_agentsocket_proto_rawDesc)),
|
||||
NumEnums: 0,
|
||||
NumMessages: 13,
|
||||
NumExtensions: 0,
|
||||
@@ -962,7 +722,6 @@ func file_agent_agentsocket_proto_agentsocket_proto_init() {
|
||||
MessageInfos: file_agent_agentsocket_proto_agentsocket_proto_msgTypes,
|
||||
}.Build()
|
||||
File_agent_agentsocket_proto_agentsocket_proto = out.File
|
||||
file_agent_agentsocket_proto_agentsocket_proto_rawDesc = nil
|
||||
file_agent_agentsocket_proto_agentsocket_proto_goTypes = nil
|
||||
file_agent_agentsocket_proto_agentsocket_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/agent/agentsocket/proto"
|
||||
"github.com/coder/coder/v2/agent/agentutil"
|
||||
"github.com/coder/coder/v2/agent/unit"
|
||||
"github.com/coder/coder/v2/codersdk/drpcsdk"
|
||||
)
|
||||
@@ -80,10 +79,10 @@ func NewServer(logger slog.Logger, opts ...Option) (*Server, error) {
|
||||
server.logger.Info(server.ctx, "agent socket server started", slog.F("path", server.path))
|
||||
|
||||
server.wg.Add(1)
|
||||
agentutil.Go(server.ctx, server.logger, func() {
|
||||
go func() {
|
||||
defer server.wg.Done()
|
||||
server.acceptConnections()
|
||||
})
|
||||
}()
|
||||
|
||||
return server, nil
|
||||
}
|
||||
|
||||
+10
-11
@@ -29,7 +29,6 @@ import (
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/agent/agentcontainers"
|
||||
"github.com/coder/coder/v2/agent/agentutil"
|
||||
"github.com/coder/coder/v2/agent/agentexec"
|
||||
"github.com/coder/coder/v2/agent/agentrsa"
|
||||
"github.com/coder/coder/v2/agent/usershell"
|
||||
@@ -635,13 +634,13 @@ func (s *Server) startNonPTYSession(logger slog.Logger, session ssh.Session, mag
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "no", "stdin_pipe").Add(1)
|
||||
return xerrors.Errorf("create stdin pipe: %w", err)
|
||||
}
|
||||
agentutil.Go(session.Context(), logger, func() {
|
||||
go func() {
|
||||
_, err := io.Copy(stdinPipe, session)
|
||||
if err != nil {
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "no", "stdin_io_copy").Add(1)
|
||||
}
|
||||
_ = stdinPipe.Close()
|
||||
})
|
||||
}()
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "no", "start_command").Add(1)
|
||||
@@ -663,11 +662,11 @@ func (s *Server) startNonPTYSession(logger slog.Logger, session ssh.Session, mag
|
||||
session.Signals(nil)
|
||||
close(sigs)
|
||||
}()
|
||||
agentutil.Go(session.Context(), logger, func() {
|
||||
go func() {
|
||||
for sig := range sigs {
|
||||
handleSignal(logger, sig, cmd.Process, s.metrics, magicTypeLabel)
|
||||
}
|
||||
})
|
||||
}()
|
||||
return cmd.Wait()
|
||||
}
|
||||
|
||||
@@ -738,7 +737,7 @@ func (s *Server) startPTYSession(logger slog.Logger, session ptySession, magicTy
|
||||
session.Signals(nil)
|
||||
close(sigs)
|
||||
}()
|
||||
agentutil.Go(ctx, logger, func() {
|
||||
go func() {
|
||||
for {
|
||||
if sigs == nil && windowSize == nil {
|
||||
return
|
||||
@@ -765,14 +764,14 @@ func (s *Server) startPTYSession(logger slog.Logger, session ptySession, magicTy
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}()
|
||||
|
||||
agentutil.Go(ctx, logger, func() {
|
||||
go func() {
|
||||
_, err := io.Copy(ptty.InputWriter(), session)
|
||||
if err != nil {
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "input_io_copy").Add(1)
|
||||
}
|
||||
})
|
||||
}()
|
||||
|
||||
// We need to wait for the command output to finish copying. It's safe to
|
||||
// just do this copy on the main handler goroutine because one of two things
|
||||
@@ -1214,11 +1213,11 @@ func (s *Server) Close() error {
|
||||
// but Close() may not have completed.
|
||||
func (s *Server) Shutdown(ctx context.Context) error {
|
||||
ch := make(chan error, 1)
|
||||
agentutil.Go(ctx, s.logger, func() {
|
||||
go func() {
|
||||
// TODO(mafredri): Implement shutdown, SIGHUP running commands, etc.
|
||||
// For now we just close the server.
|
||||
ch <- s.Close()
|
||||
})
|
||||
}()
|
||||
var err error
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
||||
@@ -4,9 +4,6 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/agent/agentutil"
|
||||
)
|
||||
|
||||
// Bicopy copies all of the data between the two connections and will close them
|
||||
@@ -38,10 +35,10 @@ func Bicopy(ctx context.Context, c1, c2 io.ReadWriteCloser) {
|
||||
|
||||
// Convert waitgroup to a channel so we can also wait on the context.
|
||||
done := make(chan struct{})
|
||||
agentutil.Go(ctx, slog.Logger{}, func() {
|
||||
go func() {
|
||||
defer close(done)
|
||||
wg.Wait()
|
||||
})
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/agent/agentutil"
|
||||
)
|
||||
|
||||
// streamLocalForwardPayload describes the extra data sent in a
|
||||
@@ -131,11 +130,11 @@ func (h *forwardedUnixHandler) HandleSSHRequest(ctx ssh.Context, _ *ssh.Server,
|
||||
log.Debug(ctx, "SSH unix forward added to cache")
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
agentutil.Go(ctx, log, func() {
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
_ = ln.Close()
|
||||
})
|
||||
agentutil.Go(ctx, log, func() {
|
||||
}()
|
||||
go func() {
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
@@ -153,7 +152,7 @@ func (h *forwardedUnixHandler) HandleSSHRequest(ctx ssh.Context, _ *ssh.Server,
|
||||
SocketPath: addr,
|
||||
})
|
||||
|
||||
agentutil.Go(ctx, log, func() {
|
||||
go func() {
|
||||
ch, reqs, err := conn.OpenChannel("forwarded-streamlocal@openssh.com", payload)
|
||||
if err != nil {
|
||||
h.log.Warn(ctx, "open SSH unix forward channel to client", slog.Error(err))
|
||||
@@ -162,7 +161,7 @@ func (h *forwardedUnixHandler) HandleSSHRequest(ctx ssh.Context, _ *ssh.Server,
|
||||
}
|
||||
go gossh.DiscardRequests(reqs)
|
||||
Bicopy(ctx, ch, c)
|
||||
})
|
||||
}()
|
||||
}
|
||||
|
||||
h.Lock()
|
||||
@@ -172,7 +171,7 @@ func (h *forwardedUnixHandler) HandleSSHRequest(ctx ssh.Context, _ *ssh.Server,
|
||||
h.Unlock()
|
||||
log.Debug(ctx, "SSH unix forward listener removed from cache")
|
||||
_ = ln.Close()
|
||||
})
|
||||
}()
|
||||
|
||||
return true, nil
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/agent/agentutil"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -123,10 +122,10 @@ func (x *x11Forwarder) x11Handler(sshCtx ssh.Context, sshSession ssh.Session) (d
|
||||
}
|
||||
|
||||
// clean up the X11 session if the SSH session completes.
|
||||
agentutil.Go(ctx, x.logger, func() {
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
x.closeAndRemoveSession(x11session)
|
||||
})
|
||||
}()
|
||||
|
||||
go x.listenForConnections(ctx, x11session, serverConn, x11)
|
||||
x.logger.Debug(ctx, "X11 forwarding started", slog.F("display", x11session.display))
|
||||
@@ -207,10 +206,10 @@ func (x *x11Forwarder) listenForConnections(
|
||||
_ = conn.Close()
|
||||
continue
|
||||
}
|
||||
agentutil.Go(ctx, x.logger, func() {
|
||||
go func() {
|
||||
defer x.trackConn(conn, false)
|
||||
Bicopy(ctx, conn, channel)
|
||||
})
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -131,6 +131,7 @@ func TestServer_X11(t *testing.T) {
|
||||
|
||||
func TestServer_X11_EvictionLRU(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Skip("Flaky test, times out in CI")
|
||||
if runtime.GOOS != "linux" {
|
||||
t.Skip("X11 forwarding is only supported on Linux")
|
||||
}
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
package agentutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime/debug"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
)
|
||||
|
||||
// Go runs the provided function in a goroutine, recovering from panics and
|
||||
// logging them before re-panicking.
|
||||
func Go(ctx context.Context, log slog.Logger, fn func()) {
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Critical(ctx, "panic in goroutine",
|
||||
slog.F("panic", r),
|
||||
slog.F("stack", string(debug.Stack())),
|
||||
)
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
fn()
|
||||
}()
|
||||
}
|
||||
+2
-3
@@ -10,7 +10,6 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/agent/agentutil"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
"github.com/coder/quartz"
|
||||
@@ -70,7 +69,7 @@ func NewAppHealthReporterWithClock(
|
||||
continue
|
||||
}
|
||||
app := nextApp
|
||||
agentutil.Go(ctx, logger, func() {
|
||||
go func() {
|
||||
_ = clk.TickerFunc(ctx, time.Duration(app.Healthcheck.Interval)*time.Second, func() error {
|
||||
// We time out at the healthcheck interval to prevent getting too backed up, but
|
||||
// set it 1ms early so that it's not simultaneous with the next tick in testing,
|
||||
@@ -134,7 +133,7 @@ func NewAppHealthReporterWithClock(
|
||||
}
|
||||
return nil
|
||||
}, "healthcheck", app.Slug)
|
||||
})
|
||||
}()
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/agent/agentutil"
|
||||
"github.com/coder/coder/v2/agent/boundarylogproxy/codec"
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
)
|
||||
@@ -134,11 +133,11 @@ func (s *Server) handleConnection(ctx context.Context, conn net.Conn) {
|
||||
defer cancel()
|
||||
|
||||
s.wg.Add(1)
|
||||
agentutil.Go(ctx, s.logger, func() {
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
<-ctx.Done()
|
||||
_ = conn.Close()
|
||||
})
|
||||
}()
|
||||
|
||||
// This is intended to be a sane starting point for the read buffer size. It may be
|
||||
// grown by codec.ReadFrame if necessary.
|
||||
|
||||
+954
-2224
File diff suppressed because it is too large
Load Diff
@@ -364,6 +364,8 @@ message Connection {
|
||||
VSCODE = 2;
|
||||
JETBRAINS = 3;
|
||||
RECONNECTING_PTY = 4;
|
||||
WORKSPACE_APP = 5;
|
||||
PORT_FORWARDING = 6;
|
||||
}
|
||||
|
||||
bytes id = 1;
|
||||
@@ -373,6 +375,7 @@ message Connection {
|
||||
string ip = 5;
|
||||
int32 status_code = 6;
|
||||
optional string reason = 7;
|
||||
optional string slug_or_port = 8;
|
||||
}
|
||||
|
||||
message ReportConnectionRequest {
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/hashicorp/go-reap"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
)
|
||||
|
||||
type Option func(o *options)
|
||||
@@ -34,8 +36,15 @@ func WithCatchSignals(sigs ...os.Signal) Option {
|
||||
}
|
||||
}
|
||||
|
||||
func WithLogger(logger slog.Logger) Option {
|
||||
return func(o *options) {
|
||||
o.Logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
type options struct {
|
||||
ExecArgs []string
|
||||
PIDs reap.PidCh
|
||||
CatchSignals []os.Signal
|
||||
Logger slog.Logger
|
||||
}
|
||||
|
||||
@@ -3,12 +3,15 @@
|
||||
package reaper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/hashicorp/go-reap"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
)
|
||||
|
||||
// IsInitProcess returns true if the current process's PID is 1.
|
||||
@@ -16,7 +19,7 @@ func IsInitProcess() bool {
|
||||
return os.Getpid() == 1
|
||||
}
|
||||
|
||||
func catchSignals(pid int, sigs []os.Signal) {
|
||||
func catchSignals(logger slog.Logger, pid int, sigs []os.Signal) {
|
||||
if len(sigs) == 0 {
|
||||
return
|
||||
}
|
||||
@@ -25,10 +28,19 @@ func catchSignals(pid int, sigs []os.Signal) {
|
||||
signal.Notify(sc, sigs...)
|
||||
defer signal.Stop(sc)
|
||||
|
||||
logger.Info(context.Background(), "reaper catching signals",
|
||||
slog.F("signals", sigs),
|
||||
slog.F("child_pid", pid),
|
||||
)
|
||||
|
||||
for {
|
||||
s := <-sc
|
||||
sig, ok := s.(syscall.Signal)
|
||||
if ok {
|
||||
logger.Info(context.Background(), "reaper caught signal, killing child process",
|
||||
slog.F("signal", sig.String()),
|
||||
slog.F("child_pid", pid),
|
||||
)
|
||||
_ = syscall.Kill(pid, sig)
|
||||
}
|
||||
}
|
||||
@@ -78,7 +90,7 @@ func ForkReap(opt ...Option) (int, error) {
|
||||
return 1, xerrors.Errorf("fork exec: %w", err)
|
||||
}
|
||||
|
||||
go catchSignals(pid, opts.CatchSignals)
|
||||
go catchSignals(opts.Logger, pid, opts.CatchSignals)
|
||||
|
||||
var wstatus syscall.WaitStatus
|
||||
_, err = syscall.Wait4(pid, &wstatus, 0, nil)
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/agent/agentexec"
|
||||
"github.com/coder/coder/v2/agent/agentutil"
|
||||
"github.com/coder/coder/v2/pty"
|
||||
)
|
||||
|
||||
@@ -77,7 +76,7 @@ func newBuffered(ctx context.Context, logger slog.Logger, execer agentexec.Exece
|
||||
// We do not need to separately monitor for the process exiting. When it
|
||||
// exits, our ptty.OutputReader() will return EOF after reading all process
|
||||
// output.
|
||||
agentutil.Go(ctx, logger, func() {
|
||||
go func() {
|
||||
buffer := make([]byte, 1024)
|
||||
for {
|
||||
read, err := ptty.OutputReader().Read(buffer)
|
||||
@@ -119,7 +118,7 @@ func newBuffered(ctx context.Context, logger slog.Logger, execer agentexec.Exece
|
||||
}
|
||||
rpty.state.cond.L.Unlock()
|
||||
}
|
||||
})
|
||||
}()
|
||||
|
||||
return rpty
|
||||
}
|
||||
@@ -134,7 +133,7 @@ func (rpty *bufferedReconnectingPTY) lifecycle(ctx context.Context, logger slog.
|
||||
logger.Debug(ctx, "reconnecting pty ready")
|
||||
rpty.state.setState(StateReady, nil)
|
||||
|
||||
state, reasonErr := rpty.state.waitForStateOrContext(ctx, StateClosing, logger)
|
||||
state, reasonErr := rpty.state.waitForStateOrContext(ctx, StateClosing)
|
||||
if state < StateClosing {
|
||||
// If we have not closed yet then the context is what unblocked us (which
|
||||
// means the agent is shutting down) so move into the closing phase.
|
||||
@@ -191,7 +190,7 @@ func (rpty *bufferedReconnectingPTY) Attach(ctx context.Context, connID string,
|
||||
delete(rpty.activeConns, connID)
|
||||
}()
|
||||
|
||||
state, err := rpty.state.waitForStateOrContext(ctx, StateReady, logger)
|
||||
state, err := rpty.state.waitForStateOrContext(ctx, StateReady)
|
||||
if state != StateReady {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/agent/agentexec"
|
||||
"github.com/coder/coder/v2/agent/agentutil"
|
||||
"github.com/coder/coder/v2/codersdk/workspacesdk"
|
||||
"github.com/coder/coder/v2/pty"
|
||||
)
|
||||
@@ -178,20 +177,20 @@ func (s *ptyState) waitForState(state State) (State, error) {
|
||||
|
||||
// waitForStateOrContext blocks until the state or a greater one is reached or
|
||||
// the provided context ends.
|
||||
func (s *ptyState) waitForStateOrContext(ctx context.Context, state State, logger slog.Logger) (State, error) {
|
||||
func (s *ptyState) waitForStateOrContext(ctx context.Context, state State) (State, error) {
|
||||
s.cond.L.Lock()
|
||||
defer s.cond.L.Unlock()
|
||||
|
||||
nevermind := make(chan struct{})
|
||||
defer close(nevermind)
|
||||
agentutil.Go(ctx, logger, func() {
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Wake up when the context ends.
|
||||
s.cond.Broadcast()
|
||||
case <-nevermind:
|
||||
}
|
||||
})
|
||||
}()
|
||||
|
||||
for ctx.Err() == nil && state > s.state {
|
||||
s.cond.Wait()
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/agent/agentexec"
|
||||
"github.com/coder/coder/v2/agent/agentutil"
|
||||
"github.com/coder/coder/v2/pty"
|
||||
)
|
||||
|
||||
@@ -142,7 +141,7 @@ func (rpty *screenReconnectingPTY) lifecycle(ctx context.Context, logger slog.Lo
|
||||
logger.Debug(ctx, "reconnecting pty ready")
|
||||
rpty.state.setState(StateReady, nil)
|
||||
|
||||
state, reasonErr := rpty.state.waitForStateOrContext(ctx, StateClosing, logger)
|
||||
state, reasonErr := rpty.state.waitForStateOrContext(ctx, StateClosing)
|
||||
if state < StateClosing {
|
||||
// If we have not closed yet then the context is what unblocked us (which
|
||||
// means the agent is shutting down) so move into the closing phase.
|
||||
@@ -167,7 +166,7 @@ func (rpty *screenReconnectingPTY) Attach(ctx context.Context, _ string, conn ne
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
state, err := rpty.state.waitForStateOrContext(ctx, StateReady, logger)
|
||||
state, err := rpty.state.waitForStateOrContext(ctx, StateReady)
|
||||
if state != StateReady {
|
||||
return err
|
||||
}
|
||||
@@ -257,7 +256,7 @@ func (rpty *screenReconnectingPTY) doAttach(ctx context.Context, conn net.Conn,
|
||||
// We do not need to separately monitor for the process exiting. When it
|
||||
// exits, our ptty.OutputReader() will return EOF after reading all process
|
||||
// output.
|
||||
agentutil.Go(ctx, logger, func() {
|
||||
go func() {
|
||||
defer versionCancel()
|
||||
defer func() {
|
||||
err := conn.Close()
|
||||
@@ -299,7 +298,7 @@ func (rpty *screenReconnectingPTY) doAttach(ctx context.Context, conn net.Conn,
|
||||
break
|
||||
}
|
||||
}
|
||||
})
|
||||
}()
|
||||
|
||||
// Version seems to be the only command without a side effect (other than
|
||||
// making the version pop up briefly) so use it to wait for the session to
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/agent/agentcontainers"
|
||||
"github.com/coder/coder/v2/agent/agentutil"
|
||||
"github.com/coder/coder/v2/agent/agentssh"
|
||||
"github.com/coder/coder/v2/agent/usershell"
|
||||
"github.com/coder/coder/v2/codersdk/workspacesdk"
|
||||
@@ -91,7 +90,7 @@ func (s *Server) Serve(ctx, hardCtx context.Context, l net.Listener) (retErr err
|
||||
wg.Add(1)
|
||||
disconnected := s.reportConnection(uuid.New(), remoteAddrString)
|
||||
closed := make(chan struct{})
|
||||
agentutil.Go(ctx, clog, func() {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
select {
|
||||
case <-closed:
|
||||
@@ -99,9 +98,9 @@ func (s *Server) Serve(ctx, hardCtx context.Context, l net.Listener) (retErr err
|
||||
disconnected(1, "server shut down")
|
||||
_ = conn.Close()
|
||||
}
|
||||
})
|
||||
}()
|
||||
wg.Add(1)
|
||||
agentutil.Go(ctx, clog, func() {
|
||||
go func() {
|
||||
defer close(closed)
|
||||
defer wg.Done()
|
||||
err := s.handleConn(ctx, clog, conn)
|
||||
@@ -114,7 +113,7 @@ func (s *Server) Serve(ctx, hardCtx context.Context, l net.Listener) (retErr err
|
||||
} else {
|
||||
disconnected(0, "")
|
||||
}
|
||||
})
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
return retErr
|
||||
@@ -227,18 +226,18 @@ func (s *Server) handleConn(ctx context.Context, logger slog.Logger, conn net.Co
|
||||
)
|
||||
|
||||
done := make(chan struct{})
|
||||
agentutil.Go(ctx, connLogger, func() {
|
||||
go func() {
|
||||
select {
|
||||
case <-done:
|
||||
case <-ctx.Done():
|
||||
rpty.Close(ctx.Err())
|
||||
}
|
||||
})
|
||||
}()
|
||||
|
||||
agentutil.Go(ctx, connLogger, func() {
|
||||
go func() {
|
||||
rpty.Wait()
|
||||
s.reconnectingPTYs.Delete(msg.ID)
|
||||
})
|
||||
}()
|
||||
|
||||
connected = true
|
||||
sendConnected <- rpty
|
||||
|
||||
+2
-3
@@ -10,7 +10,6 @@ import (
|
||||
"tailscale.com/types/netlogtype"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/agent/agentutil"
|
||||
"github.com/coder/coder/v2/agent/proto"
|
||||
)
|
||||
|
||||
@@ -87,13 +86,13 @@ func (s *statsReporter) reportLoop(ctx context.Context, dest statsDest) error {
|
||||
// use a separate goroutine to monitor the context so that we notice immediately, rather than
|
||||
// waiting for the next callback (which might never come if we are closing!)
|
||||
ctxDone := false
|
||||
agentutil.Go(ctx, s.logger, func() {
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
s.L.Lock()
|
||||
defer s.L.Unlock()
|
||||
ctxDone = true
|
||||
s.Broadcast()
|
||||
})
|
||||
}()
|
||||
defer s.logger.Debug(ctx, "reportLoop exiting")
|
||||
|
||||
s.L.Lock()
|
||||
|
||||
+44
-16
@@ -9,6 +9,7 @@ import (
|
||||
"net/http/pprof"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
@@ -130,6 +131,7 @@ func workspaceAgent() *serpent.Command {
|
||||
|
||||
sinks = append(sinks, sloghuman.Sink(logWriter))
|
||||
logger := inv.Logger.AppendSinks(sinks...).Leveled(slog.LevelDebug)
|
||||
logger = logger.Named("reaper")
|
||||
|
||||
logger.Info(ctx, "spawning reaper process")
|
||||
// Do not start a reaper on the child process. It's important
|
||||
@@ -139,31 +141,19 @@ func workspaceAgent() *serpent.Command {
|
||||
exitCode, err := reaper.ForkReap(
|
||||
reaper.WithExecArgs(args...),
|
||||
reaper.WithCatchSignals(StopSignals...),
|
||||
reaper.WithLogger(logger),
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "agent process reaper unable to fork", slog.Error(err))
|
||||
return xerrors.Errorf("fork reap: %w", err)
|
||||
}
|
||||
|
||||
logger.Info(ctx, "reaper child process exited", slog.F("exit_code", exitCode))
|
||||
logger.Info(ctx, "child process exited, propagating exit code",
|
||||
slog.F("exit_code", exitCode),
|
||||
)
|
||||
return ExitError(exitCode, nil)
|
||||
}
|
||||
|
||||
// Handle interrupt signals to allow for graceful shutdown,
|
||||
// note that calling stopNotify disables the signal handler
|
||||
// and the next interrupt will terminate the program (you
|
||||
// probably want cancel instead).
|
||||
//
|
||||
// Note that we don't want to handle these signals in the
|
||||
// process that runs as PID 1, that's why we do this after
|
||||
// the reaper forked.
|
||||
ctx, stopNotify := inv.SignalNotifyContext(ctx, StopSignals...)
|
||||
defer stopNotify()
|
||||
|
||||
// DumpHandler does signal handling, so we call it after the
|
||||
// reaper.
|
||||
go DumpHandler(ctx, "agent")
|
||||
|
||||
logWriter := &clilog.LumberjackWriteCloseFixer{Writer: &lumberjack.Logger{
|
||||
Filename: filepath.Join(logDir, "coder-agent.log"),
|
||||
MaxSize: 5, // MB
|
||||
@@ -176,6 +166,21 @@ func workspaceAgent() *serpent.Command {
|
||||
sinks = append(sinks, sloghuman.Sink(logWriter))
|
||||
logger := inv.Logger.AppendSinks(sinks...).Leveled(slog.LevelDebug)
|
||||
|
||||
// Handle interrupt signals to allow for graceful shutdown,
|
||||
// note that calling stopNotify disables the signal handler
|
||||
// and the next interrupt will terminate the program (you
|
||||
// probably want cancel instead).
|
||||
//
|
||||
// Note that we also handle these signals in the
|
||||
// process that runs as PID 1, mainly to forward it to the agent child
|
||||
// so that it can shutdown gracefully.
|
||||
ctx, stopNotify := logSignalNotifyContext(ctx, logger, StopSignals...)
|
||||
defer stopNotify()
|
||||
|
||||
// DumpHandler does signal handling, so we call it after the
|
||||
// reaper.
|
||||
go DumpHandler(ctx, "agent")
|
||||
|
||||
version := buildinfo.Version()
|
||||
logger.Info(ctx, "agent is starting now",
|
||||
slog.F("url", agentAuth.agentURL),
|
||||
@@ -565,3 +570,26 @@ func urlPort(u string) (int, error) {
|
||||
}
|
||||
return -1, xerrors.Errorf("invalid port: %s", u)
|
||||
}
|
||||
|
||||
// logSignalNotifyContext is like signal.NotifyContext but logs the received
|
||||
// signal before canceling the context.
|
||||
func logSignalNotifyContext(parent context.Context, logger slog.Logger, signals ...os.Signal) (context.Context, context.CancelFunc) {
|
||||
ctx, cancel := context.WithCancelCause(parent)
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, signals...)
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case sig := <-c:
|
||||
logger.Info(ctx, "agent received signal", slog.F("signal", sig.String()))
|
||||
cancel(xerrors.Errorf("signal: %s", sig.String()))
|
||||
case <-ctx.Done():
|
||||
logger.Info(ctx, "ctx canceled, stopping signal handler")
|
||||
}
|
||||
}()
|
||||
|
||||
return ctx, func() {
|
||||
cancel(context.Canceled)
|
||||
signal.Stop(c)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package cliutil
|
||||
package hostname
|
||||
|
||||
import (
|
||||
"os"
|
||||
@@ -23,7 +23,9 @@ func (r *RootCmd) organizations() *serpent.Command {
|
||||
},
|
||||
Children: []*serpent.Command{
|
||||
r.showOrganization(orgContext),
|
||||
r.listOrganizations(),
|
||||
r.createOrganization(),
|
||||
r.deleteOrganization(orgContext),
|
||||
r.organizationMembers(orgContext),
|
||||
r.organizationRoles(orgContext),
|
||||
r.organizationSettings(orgContext),
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -12,8 +15,10 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
"github.com/coder/pretty"
|
||||
)
|
||||
|
||||
func TestCurrentOrganization(t *testing.T) {
|
||||
@@ -54,6 +59,166 @@ func TestCurrentOrganization(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestOrganizationList(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
orgID := uuid.New()
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v2/organizations":
|
||||
_ = json.NewEncoder(w).Encode([]codersdk.Organization{
|
||||
{
|
||||
MinimalOrganization: codersdk.MinimalOrganization{
|
||||
ID: orgID,
|
||||
Name: "my-org",
|
||||
DisplayName: "My Org",
|
||||
},
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
})
|
||||
default:
|
||||
t.Errorf("unexpected request: %s %s", r.Method, r.URL.Path)
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
client := codersdk.New(must(url.Parse(server.URL)))
|
||||
inv, root := clitest.New(t, "organizations", "list")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
|
||||
require.NoError(t, inv.Run())
|
||||
require.Contains(t, buf.String(), "my-org")
|
||||
require.Contains(t, buf.String(), "My Org")
|
||||
require.Contains(t, buf.String(), orgID.String())
|
||||
})
|
||||
}
|
||||
|
||||
func TestOrganizationDelete(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("Yes", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
orgID := uuid.New()
|
||||
var deleteCalled atomic.Bool
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v2/organizations/my-org":
|
||||
_ = json.NewEncoder(w).Encode(codersdk.Organization{
|
||||
MinimalOrganization: codersdk.MinimalOrganization{
|
||||
ID: orgID,
|
||||
Name: "my-org",
|
||||
},
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
})
|
||||
case r.Method == http.MethodDelete && r.URL.Path == fmt.Sprintf("/api/v2/organizations/%s", orgID.String()):
|
||||
deleteCalled.Store(true)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
default:
|
||||
t.Errorf("unexpected request: %s %s", r.Method, r.URL.Path)
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
client := codersdk.New(must(url.Parse(server.URL)))
|
||||
inv, root := clitest.New(t, "organizations", "delete", "my-org", "--yes")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
|
||||
require.NoError(t, inv.Run())
|
||||
require.True(t, deleteCalled.Load(), "expected delete request")
|
||||
})
|
||||
|
||||
t.Run("Prompted", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
orgID := uuid.New()
|
||||
var deleteCalled atomic.Bool
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v2/organizations/my-org":
|
||||
_ = json.NewEncoder(w).Encode(codersdk.Organization{
|
||||
MinimalOrganization: codersdk.MinimalOrganization{
|
||||
ID: orgID,
|
||||
Name: "my-org",
|
||||
},
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
})
|
||||
case r.Method == http.MethodDelete && r.URL.Path == fmt.Sprintf("/api/v2/organizations/%s", orgID.String()):
|
||||
deleteCalled.Store(true)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
default:
|
||||
t.Errorf("unexpected request: %s %s", r.Method, r.URL.Path)
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
client := codersdk.New(must(url.Parse(server.URL)))
|
||||
inv, root := clitest.New(t, "organizations", "delete", "my-org")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
|
||||
execDone := make(chan error)
|
||||
go func() {
|
||||
execDone <- inv.Run()
|
||||
}()
|
||||
|
||||
pty.ExpectMatch(fmt.Sprintf("Delete organization %s?", pretty.Sprint(cliui.DefaultStyles.Code, "my-org")))
|
||||
pty.WriteLine("yes")
|
||||
|
||||
require.NoError(t, <-execDone)
|
||||
require.True(t, deleteCalled.Load(), "expected delete request")
|
||||
})
|
||||
|
||||
t.Run("Default", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
orgID := uuid.New()
|
||||
var deleteCalled atomic.Bool
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v2/organizations/default":
|
||||
_ = json.NewEncoder(w).Encode(codersdk.Organization{
|
||||
MinimalOrganization: codersdk.MinimalOrganization{
|
||||
ID: orgID,
|
||||
Name: "default",
|
||||
},
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
IsDefault: true,
|
||||
})
|
||||
case r.Method == http.MethodDelete:
|
||||
deleteCalled.Store(true)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
default:
|
||||
t.Errorf("unexpected request: %s %s", r.Method, r.URL.Path)
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
client := codersdk.New(must(url.Parse(server.URL)))
|
||||
inv, root := clitest.New(t, "organizations", "delete", "default", "--yes")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
|
||||
err := inv.Run()
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "default organization")
|
||||
require.False(t, deleteCalled.Load(), "expected no delete request")
|
||||
})
|
||||
}
|
||||
|
||||
func must[V any](v V, err error) V {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
||||
@@ -0,0 +1,65 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/pretty"
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
|
||||
func (r *RootCmd) deleteOrganization(_ *OrganizationContext) *serpent.Command {
|
||||
cmd := &serpent.Command{
|
||||
Use: "delete <organization_name_or_id>",
|
||||
Short: "Delete an organization",
|
||||
Middleware: serpent.Chain(
|
||||
serpent.RequireNArgs(1),
|
||||
),
|
||||
Options: serpent.OptionSet{
|
||||
cliui.SkipPromptOption(),
|
||||
},
|
||||
Handler: func(inv *serpent.Invocation) error {
|
||||
client, err := r.InitClient(inv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
orgArg := inv.Args[0]
|
||||
organization, err := client.OrganizationByName(inv.Context(), orgArg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if organization.IsDefault {
|
||||
return xerrors.Errorf("cannot delete the default organization %q", organization.Name)
|
||||
}
|
||||
|
||||
_, err = cliui.Prompt(inv, cliui.PromptOptions{
|
||||
Text: fmt.Sprintf("Delete organization %s?", pretty.Sprint(cliui.DefaultStyles.Code, organization.Name)),
|
||||
IsConfirm: true,
|
||||
Default: cliui.ConfirmNo,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = client.DeleteOrganization(inv.Context(), organization.ID.String())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("delete organization %q: %w", organization.Name, err)
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(
|
||||
inv.Stdout,
|
||||
"Deleted organization %s at %s\n",
|
||||
pretty.Sprint(cliui.DefaultStyles.Keyword, organization.Name),
|
||||
cliui.Timestamp(time.Now()),
|
||||
)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -0,0 +1,53 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
|
||||
func (r *RootCmd) listOrganizations() *serpent.Command {
|
||||
formatter := cliui.NewOutputFormatter(
|
||||
cliui.TableFormat([]codersdk.Organization{}, []string{"name", "display name", "id", "default"}),
|
||||
cliui.JSONFormat(),
|
||||
)
|
||||
|
||||
cmd := &serpent.Command{
|
||||
Use: "list",
|
||||
Short: "List all organizations",
|
||||
Long: "List all organizations. Requires a role which grants ResourceOrganization: read.",
|
||||
Aliases: []string{"ls"},
|
||||
Middleware: serpent.Chain(
|
||||
serpent.RequireNArgs(0),
|
||||
),
|
||||
Handler: func(inv *serpent.Invocation) error {
|
||||
client, err := r.InitClient(inv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
organizations, err := client.Organizations(inv.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := formatter.Format(inv.Context(), organizations)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if out == "" {
|
||||
cliui.Infof(inv.Stderr, "No organizations found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(inv.Stdout, out)
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
formatter.AttachOptions(&cmd.Options)
|
||||
return cmd
|
||||
}
|
||||
+3
-1
@@ -123,7 +123,9 @@ func (r *RootCmd) ping() *serpent.Command {
|
||||
spin.Start()
|
||||
}
|
||||
|
||||
opts := &workspacesdk.DialAgentOptions{}
|
||||
opts := &workspacesdk.DialAgentOptions{
|
||||
ShortDescription: "CLI ping",
|
||||
}
|
||||
|
||||
if r.verbose {
|
||||
opts.Logger = inv.Logger.AppendSinks(sloghuman.Sink(inv.Stdout)).Leveled(slog.LevelDebug)
|
||||
|
||||
+3
-1
@@ -107,7 +107,9 @@ func (r *RootCmd) portForward() *serpent.Command {
|
||||
return xerrors.Errorf("await agent: %w", err)
|
||||
}
|
||||
|
||||
opts := &workspacesdk.DialAgentOptions{}
|
||||
opts := &workspacesdk.DialAgentOptions{
|
||||
ShortDescription: "CLI port-forward",
|
||||
}
|
||||
|
||||
logger := inv.Logger
|
||||
if r.verbose {
|
||||
|
||||
+3
-3
@@ -59,7 +59,7 @@ import (
|
||||
"github.com/coder/coder/v2/buildinfo"
|
||||
"github.com/coder/coder/v2/cli/clilog"
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/cli/cliutil"
|
||||
"github.com/coder/coder/v2/cli/cliutil/hostname"
|
||||
"github.com/coder/coder/v2/cli/config"
|
||||
"github.com/coder/coder/v2/coderd"
|
||||
"github.com/coder/coder/v2/coderd/autobuild"
|
||||
@@ -1029,7 +1029,7 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
|
||||
suffix := fmt.Sprintf("%d", i)
|
||||
// The suffix is added to the hostname, so we may need to trim to fit into
|
||||
// the 64 character limit.
|
||||
hostname := stringutil.Truncate(cliutil.Hostname(), 63-len(suffix))
|
||||
hostname := stringutil.Truncate(hostname.Hostname(), 63-len(suffix))
|
||||
name := fmt.Sprintf("%s-%s", hostname, suffix)
|
||||
daemonCacheDir := filepath.Join(cacheDir, fmt.Sprintf("provisioner-%d", i))
|
||||
daemon, err := newProvisionerDaemon(
|
||||
@@ -2174,7 +2174,7 @@ func startBuiltinPostgres(ctx context.Context, cfg config.Root, logger slog.Logg
|
||||
// existing database
|
||||
retryPortDiscovery := errors.Is(err, os.ErrNotExist) && testing.Testing()
|
||||
if retryPortDiscovery {
|
||||
maxAttempts = 3
|
||||
maxAttempts = 10
|
||||
}
|
||||
|
||||
var startErr error
|
||||
|
||||
+3
-1
@@ -97,7 +97,9 @@ func (r *RootCmd) speedtest() *serpent.Command {
|
||||
return xerrors.Errorf("await agent: %w", err)
|
||||
}
|
||||
|
||||
opts := &workspacesdk.DialAgentOptions{}
|
||||
opts := &workspacesdk.DialAgentOptions{
|
||||
ShortDescription: "CLI speedtest",
|
||||
}
|
||||
if r.verbose {
|
||||
opts.Logger = inv.Logger.AppendSinks(sloghuman.Sink(inv.Stderr)).Leveled(slog.LevelDebug)
|
||||
}
|
||||
|
||||
+8
-3
@@ -365,6 +365,10 @@ func (r *RootCmd) ssh() *serpent.Command {
|
||||
}
|
||||
return err
|
||||
}
|
||||
shortDescription := "CLI ssh"
|
||||
if stdio {
|
||||
shortDescription = "CLI ssh (stdio)"
|
||||
}
|
||||
|
||||
// If we're in stdio mode, check to see if we can use Coder Connect.
|
||||
// We don't support Coder Connect over non-stdio coder ssh yet.
|
||||
@@ -405,9 +409,10 @@ func (r *RootCmd) ssh() *serpent.Command {
|
||||
}
|
||||
conn, err := wsClient.
|
||||
DialAgent(ctx, workspaceAgent.ID, &workspacesdk.DialAgentOptions{
|
||||
Logger: logger,
|
||||
BlockEndpoints: r.disableDirect,
|
||||
EnableTelemetry: !r.disableNetworkTelemetry,
|
||||
Logger: logger,
|
||||
BlockEndpoints: r.disableDirect,
|
||||
EnableTelemetry: !r.disableNetworkTelemetry,
|
||||
ShortDescription: shortDescription,
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("dial agent: %w", err)
|
||||
|
||||
@@ -418,6 +418,7 @@ func writeBundle(src *support.Bundle, dest *zip.Writer) error {
|
||||
"workspace/template_version.json": src.Workspace.TemplateVersion,
|
||||
"workspace/parameters.json": src.Workspace.Parameters,
|
||||
"workspace/workspace.json": src.Workspace.Workspace,
|
||||
"workspace/workspace_sessions.json": src.Workspace.WorkspaceSessions,
|
||||
} {
|
||||
f, err := dest.Create(k)
|
||||
if err != nil {
|
||||
|
||||
+28
-19
@@ -39,15 +39,16 @@ func Test_TaskLogs_Golden(t *testing.T) {
|
||||
|
||||
t.Run("ByTaskName_JSON", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskLogsOK(testMessages))
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
client, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskLogsOK(testMessages))
|
||||
userClient := client // user already has access to their own workspace
|
||||
|
||||
inv, root := clitest.New(t, "task", "logs", task.Name, "--output", "json")
|
||||
output := clitest.Capture(inv)
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -62,15 +63,16 @@ func Test_TaskLogs_Golden(t *testing.T) {
|
||||
|
||||
t.Run("ByTaskID_JSON", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskLogsOK(testMessages))
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
client, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskLogsOK(testMessages))
|
||||
userClient := client
|
||||
|
||||
inv, root := clitest.New(t, "task", "logs", task.ID.String(), "--output", "json")
|
||||
output := clitest.Capture(inv)
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -85,15 +87,16 @@ func Test_TaskLogs_Golden(t *testing.T) {
|
||||
|
||||
t.Run("ByTaskID_Table", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskLogsOK(testMessages))
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
client, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskLogsOK(testMessages))
|
||||
userClient := client
|
||||
|
||||
inv, root := clitest.New(t, "task", "logs", task.ID.String())
|
||||
output := clitest.Capture(inv)
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -139,29 +142,31 @@ func Test_TaskLogs_Golden(t *testing.T) {
|
||||
|
||||
t.Run("ErrorFetchingLogs", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskLogsErr(assert.AnError))
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
client, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskLogsErr(assert.AnError))
|
||||
userClient := client
|
||||
|
||||
inv, root := clitest.New(t, "task", "logs", task.ID.String())
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.ErrorContains(t, err, assert.AnError.Error())
|
||||
})
|
||||
|
||||
t.Run("SnapshotWithLogs_Table", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
client, task := setupCLITaskTestWithSnapshot(ctx, t, codersdk.TaskStatusPaused, testMessages)
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
client, task := setupCLITaskTestWithSnapshot(setupCtx, t, codersdk.TaskStatusPaused, testMessages)
|
||||
userClient := client
|
||||
|
||||
inv, root := clitest.New(t, "task", "logs", task.Name)
|
||||
output := clitest.Capture(inv)
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -171,15 +176,16 @@ func Test_TaskLogs_Golden(t *testing.T) {
|
||||
|
||||
t.Run("SnapshotWithLogs_JSON", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
client, task := setupCLITaskTestWithSnapshot(ctx, t, codersdk.TaskStatusPaused, testMessages)
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
client, task := setupCLITaskTestWithSnapshot(setupCtx, t, codersdk.TaskStatusPaused, testMessages)
|
||||
userClient := client
|
||||
|
||||
inv, root := clitest.New(t, "task", "logs", task.Name, "--output", "json")
|
||||
output := clitest.Capture(inv)
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -194,7 +200,6 @@ func Test_TaskLogs_Golden(t *testing.T) {
|
||||
|
||||
t.Run("SnapshotWithoutLogs_NoSnapshotCaptured", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
client, task := setupCLITaskTestWithoutSnapshot(t, codersdk.TaskStatusPaused)
|
||||
userClient := client
|
||||
@@ -203,6 +208,7 @@ func Test_TaskLogs_Golden(t *testing.T) {
|
||||
output := clitest.Capture(inv)
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -212,7 +218,6 @@ func Test_TaskLogs_Golden(t *testing.T) {
|
||||
|
||||
t.Run("SnapshotWithSingleMessage", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
singleMessage := []agentapisdk.Message{
|
||||
{
|
||||
@@ -223,13 +228,15 @@ func Test_TaskLogs_Golden(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
client, task := setupCLITaskTestWithSnapshot(ctx, t, codersdk.TaskStatusPending, singleMessage)
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
client, task := setupCLITaskTestWithSnapshot(setupCtx, t, codersdk.TaskStatusPending, singleMessage)
|
||||
userClient := client
|
||||
|
||||
inv, root := clitest.New(t, "task", "logs", task.Name)
|
||||
output := clitest.Capture(inv)
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -239,15 +246,16 @@ func Test_TaskLogs_Golden(t *testing.T) {
|
||||
|
||||
t.Run("SnapshotEmptyLogs", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
client, task := setupCLITaskTestWithSnapshot(ctx, t, codersdk.TaskStatusInitializing, []agentapisdk.Message{})
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
client, task := setupCLITaskTestWithSnapshot(setupCtx, t, codersdk.TaskStatusInitializing, []agentapisdk.Message{})
|
||||
userClient := client
|
||||
|
||||
inv, root := clitest.New(t, "task", "logs", task.Name)
|
||||
output := clitest.Capture(inv)
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -257,15 +265,16 @@ func Test_TaskLogs_Golden(t *testing.T) {
|
||||
|
||||
t.Run("InitializingTaskSnapshot", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
client, task := setupCLITaskTestWithSnapshot(ctx, t, codersdk.TaskStatusInitializing, testMessages)
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
client, task := setupCLITaskTestWithSnapshot(setupCtx, t, codersdk.TaskStatusInitializing, testMessages)
|
||||
userClient := client
|
||||
|
||||
inv, root := clitest.New(t, "task", "logs", task.Name)
|
||||
output := clitest.Capture(inv)
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
+12
-8
@@ -23,9 +23,9 @@ func Test_TaskSend(t *testing.T) {
|
||||
|
||||
t.Run("ByTaskName_WithArgument", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
client, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
|
||||
userClient := client
|
||||
|
||||
var stdout strings.Builder
|
||||
@@ -33,15 +33,16 @@ func Test_TaskSend(t *testing.T) {
|
||||
inv.Stdout = &stdout
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("ByTaskID_WithArgument", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
client, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
|
||||
userClient := client
|
||||
|
||||
var stdout strings.Builder
|
||||
@@ -49,15 +50,16 @@ func Test_TaskSend(t *testing.T) {
|
||||
inv.Stdout = &stdout
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("ByTaskName_WithStdin", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
client, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
client, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
|
||||
userClient := client
|
||||
|
||||
var stdout strings.Builder
|
||||
@@ -66,6 +68,7 @@ func Test_TaskSend(t *testing.T) {
|
||||
inv.Stdin = strings.NewReader("carry on with the task")
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@@ -108,15 +111,16 @@ func Test_TaskSend(t *testing.T) {
|
||||
|
||||
t.Run("SendError", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
userClient, task := setupCLITaskTest(ctx, t, fakeAgentAPITaskSendErr(t, assert.AnError))
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
userClient, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskSendErr(t, assert.AnError))
|
||||
|
||||
var stdout strings.Builder
|
||||
inv, root := clitest.New(t, "task", "send", task.Name, "some task input")
|
||||
inv.Stdout = &stdout
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.ErrorContains(t, err, assert.AnError.Error())
|
||||
})
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
err: WARN: Task is initializing. Showing last 1 message from snapshot.
|
||||
err: WARN: Task is pending. Showing last 1 message from snapshot.
|
||||
err:
|
||||
out: TYPE CONTENT
|
||||
out: input Single message
|
||||
|
||||
@@ -9,6 +9,8 @@ USAGE:
|
||||
|
||||
SUBCOMMANDS:
|
||||
create Create a new organization.
|
||||
delete Delete an organization
|
||||
list List all organizations
|
||||
members Manage organization members
|
||||
roles Manage organization roles.
|
||||
settings Manage organization settings.
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
coder v0.0.0-devel
|
||||
|
||||
USAGE:
|
||||
coder organizations delete [flags] <organization_name_or_id>
|
||||
|
||||
Delete an organization
|
||||
|
||||
Aliases: rm
|
||||
|
||||
OPTIONS:
|
||||
-y, --yes bool
|
||||
Bypass confirmation prompts.
|
||||
|
||||
———
|
||||
Run `coder --help` for a list of global options.
|
||||
@@ -0,0 +1,21 @@
|
||||
coder v0.0.0-devel
|
||||
|
||||
USAGE:
|
||||
coder organizations list [flags]
|
||||
|
||||
List all organizations
|
||||
|
||||
Aliases: ls
|
||||
|
||||
List all organizations. Requires a role which grants ResourceOrganization:
|
||||
read.
|
||||
|
||||
OPTIONS:
|
||||
-c, --column [id|name|display name|icon|description|created at|updated at|default] (default: name,display name,id,default)
|
||||
Columns to display in table output.
|
||||
|
||||
-o, --output table|json (default: table)
|
||||
Output format.
|
||||
|
||||
———
|
||||
Run `coder --help` for a list of global options.
|
||||
+3
-2
@@ -166,8 +166,9 @@ func (r *RootCmd) vscodeSSH() *serpent.Command {
|
||||
}
|
||||
agentConn, err := workspacesdk.New(client).
|
||||
DialAgent(ctx, workspaceAgent.ID, &workspacesdk.DialAgentOptions{
|
||||
Logger: logger,
|
||||
BlockEndpoints: r.disableDirect,
|
||||
Logger: logger,
|
||||
BlockEndpoints: r.disableDirect,
|
||||
ShortDescription: "VSCode SSH",
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("dial workspace agent: %w", err)
|
||||
|
||||
@@ -89,6 +89,7 @@ type Options struct {
|
||||
PublishWorkspaceAgentLogsUpdateFn func(ctx context.Context, workspaceAgentID uuid.UUID, msg agentsdk.LogsNotifyMessage)
|
||||
NetworkTelemetryHandler func(batch []*tailnetproto.TelemetryEvent)
|
||||
BoundaryUsageTracker *boundaryusage.Tracker
|
||||
LifecycleMetrics *LifecycleMetrics
|
||||
|
||||
AccessURL *url.URL
|
||||
AppHostname string
|
||||
@@ -170,6 +171,7 @@ func New(opts Options, workspace database.Workspace) *API {
|
||||
Database: opts.Database,
|
||||
Log: opts.Log,
|
||||
PublishWorkspaceUpdateFn: api.publishWorkspaceUpdate,
|
||||
Metrics: opts.LifecycleMetrics,
|
||||
}
|
||||
|
||||
api.AppsAPI = &AppsAPI{
|
||||
@@ -200,11 +202,13 @@ func New(opts Options, workspace database.Workspace) *API {
|
||||
}
|
||||
|
||||
api.ConnLogAPI = &ConnLogAPI{
|
||||
AgentFn: api.agent,
|
||||
ConnectionLogger: opts.ConnectionLogger,
|
||||
Database: opts.Database,
|
||||
Workspace: api.cachedWorkspaceFields,
|
||||
Log: opts.Log,
|
||||
AgentFn: api.agent,
|
||||
ConnectionLogger: opts.ConnectionLogger,
|
||||
TailnetCoordinator: opts.TailnetCoordinator,
|
||||
Database: opts.Database,
|
||||
Workspace: api.cachedWorkspaceFields,
|
||||
Log: opts.Log,
|
||||
PublishWorkspaceUpdateFn: api.publishWorkspaceUpdate,
|
||||
}
|
||||
|
||||
api.DRPCService = &tailnet.DRPCService{
|
||||
|
||||
@@ -3,6 +3,8 @@ package agentapi
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -15,14 +17,18 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/db2sdk"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/wspubsub"
|
||||
"github.com/coder/coder/v2/tailnet"
|
||||
)
|
||||
|
||||
type ConnLogAPI struct {
|
||||
AgentFn func(context.Context) (database.WorkspaceAgent, error)
|
||||
ConnectionLogger *atomic.Pointer[connectionlog.ConnectionLogger]
|
||||
Workspace *CachedWorkspaceFields
|
||||
Database database.Store
|
||||
Log slog.Logger
|
||||
AgentFn func(context.Context) (database.WorkspaceAgent, error)
|
||||
ConnectionLogger *atomic.Pointer[connectionlog.ConnectionLogger]
|
||||
TailnetCoordinator *atomic.Pointer[tailnet.Coordinator]
|
||||
Workspace *CachedWorkspaceFields
|
||||
Database database.Store
|
||||
Log slog.Logger
|
||||
PublishWorkspaceUpdateFn func(context.Context, *database.WorkspaceAgent, wspubsub.WorkspaceEventKind) error
|
||||
}
|
||||
|
||||
func (a *ConnLogAPI) ReportConnection(ctx context.Context, req *agentproto.ReportConnectionRequest) (*emptypb.Empty, error) {
|
||||
@@ -88,6 +94,38 @@ func (a *ConnLogAPI) ReportConnection(ctx context.Context, req *agentproto.Repor
|
||||
}
|
||||
logIP := database.ParseIP(logIPRaw) // will return null if invalid
|
||||
|
||||
// At connect time, look up the tailnet peer to capture the
|
||||
// client hostname and description for session grouping later.
|
||||
var clientHostname, shortDescription, clientOS sql.NullString
|
||||
if action == database.ConnectionStatusConnected && a.TailnetCoordinator != nil {
|
||||
if coord := a.TailnetCoordinator.Load(); coord != nil {
|
||||
for _, peer := range (*coord).TunnelPeers(workspaceAgent.ID) {
|
||||
if peer.Node != nil {
|
||||
// Match peer by checking if any of its addresses
|
||||
// match the connection IP.
|
||||
for _, addr := range peer.Node.Addresses {
|
||||
prefix, err := netip.ParsePrefix(addr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if logIP.Valid && prefix.Addr().String() == logIP.IPNet.IP.String() {
|
||||
if peer.Node.Hostname != "" {
|
||||
clientHostname = sql.NullString{String: peer.Node.Hostname, Valid: true}
|
||||
}
|
||||
if peer.Node.ShortDescription != "" {
|
||||
shortDescription = sql.NullString{String: peer.Node.ShortDescription, Valid: true}
|
||||
}
|
||||
if peer.Node.Os != "" {
|
||||
clientOS = sql.NullString{String: peer.Node.Os, Valid: true}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
reason := req.GetConnection().GetReason()
|
||||
connLogger := *a.ConnectionLogger.Load()
|
||||
err = connLogger.Upsert(ctx, database.UpsertConnectionLogParams{
|
||||
@@ -98,6 +136,7 @@ func (a *ConnLogAPI) ReportConnection(ctx context.Context, req *agentproto.Repor
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: workspaceAgent.Name,
|
||||
AgentID: uuid.NullUUID{UUID: workspaceAgent.ID, Valid: true},
|
||||
Type: connectionType,
|
||||
Code: code,
|
||||
Ip: logIP,
|
||||
@@ -109,6 +148,7 @@ func (a *ConnLogAPI) ReportConnection(ctx context.Context, req *agentproto.Repor
|
||||
String: reason,
|
||||
Valid: reason != "",
|
||||
},
|
||||
SessionID: uuid.NullUUID{},
|
||||
// We supply the action:
|
||||
// - So the DB can handle duplicate connections or disconnections properly.
|
||||
// - To make it clear whether this is a connection or disconnection
|
||||
@@ -121,13 +161,101 @@ func (a *ConnLogAPI) ReportConnection(ctx context.Context, req *agentproto.Repor
|
||||
Valid: false,
|
||||
},
|
||||
// N/A
|
||||
UserAgent: sql.NullString{},
|
||||
// N/A
|
||||
SlugOrPort: sql.NullString{},
|
||||
UserAgent: sql.NullString{},
|
||||
ClientHostname: clientHostname,
|
||||
ShortDescription: shortDescription,
|
||||
Os: clientOS,
|
||||
SlugOrPort: sql.NullString{
|
||||
String: req.GetConnection().GetSlugOrPort(),
|
||||
Valid: req.GetConnection().GetSlugOrPort() != "",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("export connection log: %w", err)
|
||||
}
|
||||
|
||||
// At disconnect time, find or create a session for this connection.
|
||||
// This groups related connection logs into workspace sessions.
|
||||
if action == database.ConnectionStatusDisconnected {
|
||||
a.assignSessionForDisconnect(ctx, connectionID, ws, workspaceAgent, req)
|
||||
}
|
||||
|
||||
if a.PublishWorkspaceUpdateFn != nil {
|
||||
if err := a.PublishWorkspaceUpdateFn(ctx, &workspaceAgent, wspubsub.WorkspaceEventKindConnectionLogUpdate); err != nil {
|
||||
a.Log.Warn(ctx, "failed to publish connection log update", slog.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
// assignSessionForDisconnect looks up the existing connection log for this
|
||||
// connection ID and finds or creates a session to group it with.
|
||||
func (a *ConnLogAPI) assignSessionForDisconnect(
|
||||
ctx context.Context,
|
||||
connectionID uuid.UUID,
|
||||
ws database.WorkspaceIdentity,
|
||||
workspaceAgent database.WorkspaceAgent,
|
||||
req *agentproto.ReportConnectionRequest,
|
||||
) {
|
||||
//nolint:gocritic // The agent context doesn't have connection_log
|
||||
// permissions. Session creation is authorized by the workspace
|
||||
// access already validated in ReportConnection.
|
||||
ctx = dbauthz.AsConnectionLogger(ctx)
|
||||
|
||||
existingLog, err := a.Database.GetConnectionLogByConnectionID(ctx, database.GetConnectionLogByConnectionIDParams{
|
||||
ConnectionID: uuid.NullUUID{UUID: connectionID, Valid: true},
|
||||
WorkspaceID: ws.ID,
|
||||
AgentName: workspaceAgent.Name,
|
||||
})
|
||||
if err != nil {
|
||||
a.Log.Warn(ctx, "failed to look up connection log for session assignment",
|
||||
slog.Error(err),
|
||||
slog.F("connection_id", connectionID),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
sessionIDRaw, err := a.Database.FindOrCreateSessionForDisconnect(ctx, database.FindOrCreateSessionForDisconnectParams{
|
||||
WorkspaceID: ws.ID.String(),
|
||||
Ip: existingLog.Ip,
|
||||
ClientHostname: existingLog.ClientHostname,
|
||||
ShortDescription: existingLog.ShortDescription,
|
||||
ConnectTime: existingLog.ConnectTime,
|
||||
DisconnectTime: req.GetConnection().GetTimestamp().AsTime(),
|
||||
AgentID: uuid.NullUUID{UUID: workspaceAgent.ID, Valid: true},
|
||||
})
|
||||
if err != nil {
|
||||
a.Log.Warn(ctx, "failed to find or create session for disconnect",
|
||||
slog.Error(err),
|
||||
slog.F("connection_id", connectionID),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
// The query uses COALESCE which returns a generic type. The
|
||||
// database/sql driver may return the UUID as a string, []byte,
|
||||
// or [16]byte rather than uuid.UUID, so we parse it.
|
||||
sessionID, parseErr := uuid.Parse(fmt.Sprintf("%s", sessionIDRaw))
|
||||
if parseErr != nil {
|
||||
a.Log.Warn(ctx, "failed to parse session ID from FindOrCreateSessionForDisconnect",
|
||||
slog.Error(parseErr),
|
||||
slog.F("connection_id", connectionID),
|
||||
slog.F("session_id_raw", sessionIDRaw),
|
||||
slog.F("session_id_type", fmt.Sprintf("%T", sessionIDRaw)),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
// Link the connection log to its session so that
|
||||
// CloseConnectionLogsAndCreateSessions skips it.
|
||||
if err := a.Database.UpdateConnectionLogSessionID(ctx, database.UpdateConnectionLogSessionIDParams{
|
||||
ID: existingLog.ID,
|
||||
SessionID: uuid.NullUUID{UUID: sessionID, Valid: true},
|
||||
}); err != nil {
|
||||
a.Log.Warn(ctx, "failed to update connection log session ID",
|
||||
slog.Error(err),
|
||||
slog.F("connection_id", connectionID),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/database/db2sdk"
|
||||
"github.com/coder/coder/v2/coderd/database/dbmock"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/coderd/wspubsub"
|
||||
)
|
||||
|
||||
func TestConnectionLog(t *testing.T) {
|
||||
@@ -41,14 +42,15 @@ func TestConnectionLog(t *testing.T) {
|
||||
)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
id uuid.UUID
|
||||
action *agentproto.Connection_Action
|
||||
typ *agentproto.Connection_Type
|
||||
time time.Time
|
||||
ip string
|
||||
status int32
|
||||
reason string
|
||||
name string
|
||||
id uuid.UUID
|
||||
action *agentproto.Connection_Action
|
||||
typ *agentproto.Connection_Type
|
||||
time time.Time
|
||||
ip string
|
||||
status int32
|
||||
reason string
|
||||
slugOrPort string
|
||||
}{
|
||||
{
|
||||
name: "SSH Connect",
|
||||
@@ -84,6 +86,34 @@ func TestConnectionLog(t *testing.T) {
|
||||
typ: agentproto.Connection_RECONNECTING_PTY.Enum(),
|
||||
time: dbtime.Now(),
|
||||
},
|
||||
{
|
||||
name: "Port Forwarding Connect",
|
||||
id: uuid.New(),
|
||||
action: agentproto.Connection_CONNECT.Enum(),
|
||||
typ: agentproto.Connection_PORT_FORWARDING.Enum(),
|
||||
time: dbtime.Now(),
|
||||
ip: "192.168.1.1",
|
||||
slugOrPort: "8080",
|
||||
},
|
||||
{
|
||||
name: "Port Forwarding Disconnect",
|
||||
id: uuid.New(),
|
||||
action: agentproto.Connection_DISCONNECT.Enum(),
|
||||
typ: agentproto.Connection_PORT_FORWARDING.Enum(),
|
||||
time: dbtime.Now(),
|
||||
ip: "192.168.1.1",
|
||||
status: 200,
|
||||
slugOrPort: "8080",
|
||||
},
|
||||
{
|
||||
name: "Workspace App Connect",
|
||||
id: uuid.New(),
|
||||
action: agentproto.Connection_CONNECT.Enum(),
|
||||
typ: agentproto.Connection_WORKSPACE_APP.Enum(),
|
||||
time: dbtime.Now(),
|
||||
ip: "10.0.0.1",
|
||||
slugOrPort: "my-app",
|
||||
},
|
||||
{
|
||||
name: "SSH Disconnect",
|
||||
id: uuid.New(),
|
||||
@@ -110,6 +140,10 @@ func TestConnectionLog(t *testing.T) {
|
||||
|
||||
mDB := dbmock.NewMockStore(gomock.NewController(t))
|
||||
mDB.EXPECT().GetWorkspaceByAgentID(gomock.Any(), agent.ID).Return(workspace, nil)
|
||||
// Disconnect actions trigger session assignment which calls
|
||||
// GetConnectionLogByConnectionID and FindOrCreateSessionForDisconnect.
|
||||
mDB.EXPECT().GetConnectionLogByConnectionID(gomock.Any(), gomock.Any()).Return(database.ConnectionLog{}, nil).AnyTimes()
|
||||
mDB.EXPECT().FindOrCreateSessionForDisconnect(gomock.Any(), gomock.Any()).Return(database.WorkspaceSession{}, nil).AnyTimes()
|
||||
|
||||
api := &agentapi.ConnLogAPI{
|
||||
ConnectionLogger: asAtomicPointer[connectionlog.ConnectionLogger](connLogger),
|
||||
@@ -128,6 +162,7 @@ func TestConnectionLog(t *testing.T) {
|
||||
Ip: tt.ip,
|
||||
StatusCode: tt.status,
|
||||
Reason: &tt.reason,
|
||||
SlugOrPort: &tt.slugOrPort,
|
||||
},
|
||||
})
|
||||
|
||||
@@ -144,6 +179,7 @@ func TestConnectionLog(t *testing.T) {
|
||||
WorkspaceID: workspace.ID,
|
||||
WorkspaceName: workspace.Name,
|
||||
AgentName: agent.Name,
|
||||
AgentID: uuid.NullUUID{UUID: agent.ID, Valid: true},
|
||||
UserID: uuid.NullUUID{
|
||||
UUID: uuid.Nil,
|
||||
Valid: false,
|
||||
@@ -164,11 +200,72 @@ func TestConnectionLog(t *testing.T) {
|
||||
UUID: tt.id,
|
||||
Valid: tt.id != uuid.Nil,
|
||||
},
|
||||
SlugOrPort: sql.NullString{
|
||||
String: tt.slugOrPort,
|
||||
Valid: tt.slugOrPort != "",
|
||||
},
|
||||
}))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnectionLogPublishesWorkspaceUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
owner = database.User{ID: uuid.New(), Username: "cool-user"}
|
||||
workspace = database.Workspace{
|
||||
ID: uuid.New(),
|
||||
OrganizationID: uuid.New(),
|
||||
OwnerID: owner.ID,
|
||||
Name: "cool-workspace",
|
||||
}
|
||||
agent = database.WorkspaceAgent{ID: uuid.New()}
|
||||
)
|
||||
|
||||
connLogger := connectionlog.NewFake()
|
||||
|
||||
mDB := dbmock.NewMockStore(gomock.NewController(t))
|
||||
mDB.EXPECT().GetWorkspaceByAgentID(gomock.Any(), agent.ID).Return(workspace, nil)
|
||||
|
||||
var (
|
||||
called int
|
||||
gotKind wspubsub.WorkspaceEventKind
|
||||
gotAgent uuid.UUID
|
||||
)
|
||||
|
||||
api := &agentapi.ConnLogAPI{
|
||||
ConnectionLogger: asAtomicPointer[connectionlog.ConnectionLogger](connLogger),
|
||||
Database: mDB,
|
||||
AgentFn: func(context.Context) (database.WorkspaceAgent, error) {
|
||||
return agent, nil
|
||||
},
|
||||
Workspace: &agentapi.CachedWorkspaceFields{},
|
||||
PublishWorkspaceUpdateFn: func(ctx context.Context, agent *database.WorkspaceAgent, kind wspubsub.WorkspaceEventKind) error {
|
||||
called++
|
||||
gotKind = kind
|
||||
gotAgent = agent.ID
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
id := uuid.New()
|
||||
_, err := api.ReportConnection(context.Background(), &agentproto.ReportConnectionRequest{
|
||||
Connection: &agentproto.Connection{
|
||||
Id: id[:],
|
||||
Action: agentproto.Connection_CONNECT,
|
||||
Type: agentproto.Connection_SSH,
|
||||
Timestamp: timestamppb.New(dbtime.Now()),
|
||||
Ip: "127.0.0.1",
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, called)
|
||||
require.Equal(t, wspubsub.WorkspaceEventKindConnectionLogUpdate, gotKind)
|
||||
require.Equal(t, agent.ID, gotAgent)
|
||||
}
|
||||
|
||||
func agentProtoConnectionTypeToConnectionLog(t *testing.T, typ agentproto.Connection_Type) database.ConnectionType {
|
||||
a, err := db2sdk.ConnectionLogConnectionTypeFromAgentProtoConnectionType(typ)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -31,7 +32,9 @@ type LifecycleAPI struct {
|
||||
Log slog.Logger
|
||||
PublishWorkspaceUpdateFn func(context.Context, *database.WorkspaceAgent, wspubsub.WorkspaceEventKind) error
|
||||
|
||||
TimeNowFn func() time.Time // defaults to dbtime.Now()
|
||||
TimeNowFn func() time.Time // defaults to dbtime.Now()
|
||||
Metrics *LifecycleMetrics
|
||||
emitMetricsOnce sync.Once
|
||||
}
|
||||
|
||||
func (a *LifecycleAPI) now() time.Time {
|
||||
@@ -125,6 +128,17 @@ func (a *LifecycleAPI) UpdateLifecycle(ctx context.Context, req *agentproto.Upda
|
||||
}
|
||||
}
|
||||
|
||||
// Emit build duration metric when agent transitions to a terminal startup state.
|
||||
// We only emit once per agent connection to avoid duplicate metrics.
|
||||
switch lifecycleState {
|
||||
case database.WorkspaceAgentLifecycleStateReady,
|
||||
database.WorkspaceAgentLifecycleStateStartTimeout,
|
||||
database.WorkspaceAgentLifecycleStateStartError:
|
||||
a.emitMetricsOnce.Do(func() {
|
||||
a.emitBuildDurationMetric(ctx, workspaceAgent.ResourceID)
|
||||
})
|
||||
}
|
||||
|
||||
return req.Lifecycle, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -9,12 +9,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/mock/gomock"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/coderd/agentapi"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest/promhelp"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbmock"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
@@ -22,6 +24,10 @@ import (
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
// fullMetricName is the fully-qualified Prometheus metric name
|
||||
// (namespace + name) used for gathering in tests.
|
||||
const fullMetricName = "coderd_" + agentapi.BuildDurationMetricName
|
||||
|
||||
func TestUpdateLifecycle(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -30,6 +36,12 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
someTime = dbtime.Time(someTime)
|
||||
now := dbtime.Now()
|
||||
|
||||
// Fixed times for build duration metric assertions.
|
||||
// The expected duration is exactly 90 seconds.
|
||||
buildCreatedAt := dbtime.Time(time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC))
|
||||
agentReadyAt := dbtime.Time(time.Date(2025, 1, 1, 0, 1, 30, 0, time.UTC))
|
||||
expectedDuration := agentReadyAt.Sub(buildCreatedAt).Seconds() // 90.0
|
||||
|
||||
var (
|
||||
workspaceID = uuid.New()
|
||||
agentCreated = database.WorkspaceAgent{
|
||||
@@ -105,6 +117,19 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
Valid: true,
|
||||
},
|
||||
}).Return(nil)
|
||||
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agentStarting.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
|
||||
CreatedAt: buildCreatedAt,
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
TemplateName: "test-template",
|
||||
OrganizationName: "test-org",
|
||||
IsPrebuild: false,
|
||||
AllAgentsReady: true,
|
||||
LastAgentReadyAt: agentReadyAt,
|
||||
WorstStatus: "success",
|
||||
}, nil)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := agentapi.NewLifecycleMetrics(reg)
|
||||
|
||||
api := &agentapi.LifecycleAPI{
|
||||
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
|
||||
@@ -113,6 +138,7 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
WorkspaceID: workspaceID,
|
||||
Database: dbM,
|
||||
Log: testutil.Logger(t),
|
||||
Metrics: metrics,
|
||||
// Test that nil publish fn works.
|
||||
PublishWorkspaceUpdateFn: nil,
|
||||
}
|
||||
@@ -122,6 +148,16 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lifecycle, resp)
|
||||
|
||||
got := promhelp.HistogramValue(t, reg, fullMetricName, prometheus.Labels{
|
||||
"template_name": "test-template",
|
||||
"organization_name": "test-org",
|
||||
"transition": "start",
|
||||
"status": "success",
|
||||
"is_prebuild": "false",
|
||||
})
|
||||
require.Equal(t, uint64(1), got.GetSampleCount())
|
||||
require.Equal(t, expectedDuration, got.GetSampleSum())
|
||||
})
|
||||
|
||||
// This test jumps from CREATING to READY, skipping STARTED. Both the
|
||||
@@ -147,8 +183,21 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
Valid: true,
|
||||
},
|
||||
}).Return(nil)
|
||||
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agentCreated.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
|
||||
CreatedAt: buildCreatedAt,
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
TemplateName: "test-template",
|
||||
OrganizationName: "test-org",
|
||||
IsPrebuild: false,
|
||||
AllAgentsReady: true,
|
||||
LastAgentReadyAt: agentReadyAt,
|
||||
WorstStatus: "success",
|
||||
}, nil)
|
||||
|
||||
publishCalled := false
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := agentapi.NewLifecycleMetrics(reg)
|
||||
|
||||
api := &agentapi.LifecycleAPI{
|
||||
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
|
||||
return agentCreated, nil
|
||||
@@ -156,6 +205,7 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
WorkspaceID: workspaceID,
|
||||
Database: dbM,
|
||||
Log: testutil.Logger(t),
|
||||
Metrics: metrics,
|
||||
PublishWorkspaceUpdateFn: func(ctx context.Context, agent *database.WorkspaceAgent, kind wspubsub.WorkspaceEventKind) error {
|
||||
publishCalled = true
|
||||
return nil
|
||||
@@ -168,6 +218,16 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lifecycle, resp)
|
||||
require.True(t, publishCalled)
|
||||
|
||||
got := promhelp.HistogramValue(t, reg, fullMetricName, prometheus.Labels{
|
||||
"template_name": "test-template",
|
||||
"organization_name": "test-org",
|
||||
"transition": "start",
|
||||
"status": "success",
|
||||
"is_prebuild": "false",
|
||||
})
|
||||
require.Equal(t, uint64(1), got.GetSampleCount())
|
||||
require.Equal(t, expectedDuration, got.GetSampleSum())
|
||||
})
|
||||
|
||||
t.Run("NoTimeSpecified", func(t *testing.T) {
|
||||
@@ -194,6 +254,19 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
Valid: true,
|
||||
},
|
||||
})
|
||||
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agentCreated.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
|
||||
CreatedAt: buildCreatedAt,
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
TemplateName: "test-template",
|
||||
OrganizationName: "test-org",
|
||||
IsPrebuild: false,
|
||||
AllAgentsReady: true,
|
||||
LastAgentReadyAt: agentReadyAt,
|
||||
WorstStatus: "success",
|
||||
}, nil)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := agentapi.NewLifecycleMetrics(reg)
|
||||
|
||||
api := &agentapi.LifecycleAPI{
|
||||
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
|
||||
@@ -202,6 +275,7 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
WorkspaceID: workspaceID,
|
||||
Database: dbM,
|
||||
Log: testutil.Logger(t),
|
||||
Metrics: metrics,
|
||||
PublishWorkspaceUpdateFn: nil,
|
||||
TimeNowFn: func() time.Time {
|
||||
return now
|
||||
@@ -213,6 +287,16 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lifecycle, resp)
|
||||
|
||||
got := promhelp.HistogramValue(t, reg, fullMetricName, prometheus.Labels{
|
||||
"template_name": "test-template",
|
||||
"organization_name": "test-org",
|
||||
"transition": "start",
|
||||
"status": "success",
|
||||
"is_prebuild": "false",
|
||||
})
|
||||
require.Equal(t, uint64(1), got.GetSampleCount())
|
||||
require.Equal(t, expectedDuration, got.GetSampleSum())
|
||||
})
|
||||
|
||||
t.Run("AllStates", func(t *testing.T) {
|
||||
@@ -228,6 +312,9 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
dbM := dbmock.NewMockStore(gomock.NewController(t))
|
||||
|
||||
var publishCalled int64
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := agentapi.NewLifecycleMetrics(reg)
|
||||
|
||||
api := &agentapi.LifecycleAPI{
|
||||
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
|
||||
return agent, nil
|
||||
@@ -235,6 +322,7 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
WorkspaceID: workspaceID,
|
||||
Database: dbM,
|
||||
Log: testutil.Logger(t),
|
||||
Metrics: metrics,
|
||||
PublishWorkspaceUpdateFn: func(ctx context.Context, agent *database.WorkspaceAgent, kind wspubsub.WorkspaceEventKind) error {
|
||||
atomic.AddInt64(&publishCalled, 1)
|
||||
return nil
|
||||
@@ -277,6 +365,20 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
ReadyAt: expectedReadyAt,
|
||||
}).Times(1).Return(nil)
|
||||
|
||||
// The first ready state triggers the build duration metric query.
|
||||
if state == agentproto.Lifecycle_READY || state == agentproto.Lifecycle_START_TIMEOUT || state == agentproto.Lifecycle_START_ERROR {
|
||||
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agent.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
|
||||
CreatedAt: someTime,
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
TemplateName: "test-template",
|
||||
OrganizationName: "test-org",
|
||||
IsPrebuild: false,
|
||||
AllAgentsReady: true,
|
||||
LastAgentReadyAt: stateNow,
|
||||
WorstStatus: "success",
|
||||
}, nil).MaxTimes(1)
|
||||
}
|
||||
|
||||
resp, err := api.UpdateLifecycle(context.Background(), &agentproto.UpdateLifecycleRequest{
|
||||
Lifecycle: lifecycle,
|
||||
})
|
||||
@@ -322,6 +424,164 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
require.Nil(t, resp)
|
||||
require.False(t, publishCalled)
|
||||
})
|
||||
|
||||
// Test that metric is NOT emitted when not all agents are ready (multi-agent case).
|
||||
t.Run("MetricNotEmittedWhenNotAllAgentsReady", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
lifecycle := &agentproto.Lifecycle{
|
||||
State: agentproto.Lifecycle_READY,
|
||||
ChangedAt: timestamppb.New(now),
|
||||
}
|
||||
|
||||
dbM := dbmock.NewMockStore(gomock.NewController(t))
|
||||
dbM.EXPECT().UpdateWorkspaceAgentLifecycleStateByID(gomock.Any(), gomock.Any()).Return(nil)
|
||||
// Return AllAgentsReady = false to simulate multi-agent case where not all are ready.
|
||||
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agentStarting.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
|
||||
CreatedAt: someTime,
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
TemplateName: "test-template",
|
||||
OrganizationName: "test-org",
|
||||
IsPrebuild: false,
|
||||
AllAgentsReady: false, // Not all agents ready yet
|
||||
LastAgentReadyAt: time.Time{}, // No ready time yet
|
||||
WorstStatus: "success",
|
||||
}, nil)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := agentapi.NewLifecycleMetrics(reg)
|
||||
|
||||
api := &agentapi.LifecycleAPI{
|
||||
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
|
||||
return agentStarting, nil
|
||||
},
|
||||
WorkspaceID: workspaceID,
|
||||
Database: dbM,
|
||||
Log: testutil.Logger(t),
|
||||
Metrics: metrics,
|
||||
PublishWorkspaceUpdateFn: nil,
|
||||
}
|
||||
|
||||
resp, err := api.UpdateLifecycle(context.Background(), &agentproto.UpdateLifecycleRequest{
|
||||
Lifecycle: lifecycle,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lifecycle, resp)
|
||||
|
||||
require.Nil(t, promhelp.MetricValue(t, reg, fullMetricName, prometheus.Labels{
|
||||
"template_name": "test-template",
|
||||
"organization_name": "test-org",
|
||||
"transition": "start",
|
||||
"status": "success",
|
||||
"is_prebuild": "false",
|
||||
}), "metric should not be emitted when not all agents are ready")
|
||||
})
|
||||
|
||||
// Test that prebuild label is "true" when owner is prebuild system user.
|
||||
t.Run("PrebuildLabelTrue", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
lifecycle := &agentproto.Lifecycle{
|
||||
State: agentproto.Lifecycle_READY,
|
||||
ChangedAt: timestamppb.New(now),
|
||||
}
|
||||
|
||||
dbM := dbmock.NewMockStore(gomock.NewController(t))
|
||||
dbM.EXPECT().UpdateWorkspaceAgentLifecycleStateByID(gomock.Any(), gomock.Any()).Return(nil)
|
||||
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agentStarting.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
|
||||
CreatedAt: buildCreatedAt,
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
TemplateName: "test-template",
|
||||
OrganizationName: "test-org",
|
||||
IsPrebuild: true, // Prebuild workspace
|
||||
AllAgentsReady: true,
|
||||
LastAgentReadyAt: agentReadyAt,
|
||||
WorstStatus: "success",
|
||||
}, nil)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := agentapi.NewLifecycleMetrics(reg)
|
||||
|
||||
api := &agentapi.LifecycleAPI{
|
||||
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
|
||||
return agentStarting, nil
|
||||
},
|
||||
WorkspaceID: workspaceID,
|
||||
Database: dbM,
|
||||
Log: testutil.Logger(t),
|
||||
Metrics: metrics,
|
||||
PublishWorkspaceUpdateFn: nil,
|
||||
}
|
||||
|
||||
resp, err := api.UpdateLifecycle(context.Background(), &agentproto.UpdateLifecycleRequest{
|
||||
Lifecycle: lifecycle,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lifecycle, resp)
|
||||
|
||||
got := promhelp.HistogramValue(t, reg, fullMetricName, prometheus.Labels{
|
||||
"template_name": "test-template",
|
||||
"organization_name": "test-org",
|
||||
"transition": "start",
|
||||
"status": "success",
|
||||
"is_prebuild": "true",
|
||||
})
|
||||
require.Equal(t, uint64(1), got.GetSampleCount())
|
||||
require.Equal(t, expectedDuration, got.GetSampleSum())
|
||||
})
|
||||
|
||||
// Test worst status is used when one agent has an error.
|
||||
t.Run("WorstStatusError", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
lifecycle := &agentproto.Lifecycle{
|
||||
State: agentproto.Lifecycle_READY,
|
||||
ChangedAt: timestamppb.New(now),
|
||||
}
|
||||
|
||||
dbM := dbmock.NewMockStore(gomock.NewController(t))
|
||||
dbM.EXPECT().UpdateWorkspaceAgentLifecycleStateByID(gomock.Any(), gomock.Any()).Return(nil)
|
||||
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agentStarting.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
|
||||
CreatedAt: buildCreatedAt,
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
TemplateName: "test-template",
|
||||
OrganizationName: "test-org",
|
||||
IsPrebuild: false,
|
||||
AllAgentsReady: true,
|
||||
LastAgentReadyAt: agentReadyAt,
|
||||
WorstStatus: "error", // One agent had an error
|
||||
}, nil)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := agentapi.NewLifecycleMetrics(reg)
|
||||
|
||||
api := &agentapi.LifecycleAPI{
|
||||
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
|
||||
return agentStarting, nil
|
||||
},
|
||||
WorkspaceID: workspaceID,
|
||||
Database: dbM,
|
||||
Log: testutil.Logger(t),
|
||||
Metrics: metrics,
|
||||
PublishWorkspaceUpdateFn: nil,
|
||||
}
|
||||
|
||||
resp, err := api.UpdateLifecycle(context.Background(), &agentproto.UpdateLifecycleRequest{
|
||||
Lifecycle: lifecycle,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lifecycle, resp)
|
||||
|
||||
got := promhelp.HistogramValue(t, reg, fullMetricName, prometheus.Labels{
|
||||
"template_name": "test-template",
|
||||
"organization_name": "test-org",
|
||||
"transition": "start",
|
||||
"status": "error",
|
||||
"is_prebuild": "false",
|
||||
})
|
||||
require.Equal(t, uint64(1), got.GetSampleCount())
|
||||
require.Equal(t, expectedDuration, got.GetSampleSum())
|
||||
})
|
||||
}
|
||||
|
||||
func TestUpdateStartup(t *testing.T) {
|
||||
|
||||
@@ -249,11 +249,17 @@ func dbAppToProto(dbApp database.WorkspaceApp, agent database.WorkspaceAgent, ow
|
||||
func dbAgentDevcontainersToProto(devcontainers []database.WorkspaceAgentDevcontainer) []*agentproto.WorkspaceAgentDevcontainer {
|
||||
ret := make([]*agentproto.WorkspaceAgentDevcontainer, len(devcontainers))
|
||||
for i, dc := range devcontainers {
|
||||
var subagentID []byte
|
||||
if dc.SubagentID.Valid {
|
||||
subagentID = dc.SubagentID.UUID[:]
|
||||
}
|
||||
|
||||
ret[i] = &agentproto.WorkspaceAgentDevcontainer{
|
||||
Id: dc.ID[:],
|
||||
Name: dc.Name,
|
||||
WorkspaceFolder: dc.WorkspaceFolder,
|
||||
ConfigPath: dc.ConfigPath,
|
||||
SubagentId: subagentID,
|
||||
}
|
||||
}
|
||||
return ret
|
||||
|
||||
@@ -0,0 +1,97 @@
|
||||
package agentapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
)
|
||||
|
||||
// BuildDurationMetricName is the short name for the end-to-end
|
||||
// workspace build duration histogram. The full metric name is
|
||||
// prefixed with the namespace "coderd_".
|
||||
const BuildDurationMetricName = "template_workspace_build_duration_seconds"
|
||||
|
||||
// LifecycleMetrics contains Prometheus metrics for the lifecycle API.
|
||||
type LifecycleMetrics struct {
|
||||
BuildDuration *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
// NewLifecycleMetrics creates and registers all lifecycle-related
|
||||
// Prometheus metrics.
|
||||
//
|
||||
// The build duration histogram tracks the end-to-end duration from
|
||||
// workspace build creation to agent ready, by template. It is
|
||||
// recorded by the coderd replica handling the agent's connection
|
||||
// when the last agent reports ready. In multi-replica deployments,
|
||||
// each replica only has observations for agents it handles.
|
||||
//
|
||||
// The "is_prebuild" label distinguishes prebuild creation (background,
|
||||
// no user waiting) from user-initiated builds (regular workspace
|
||||
// creation or prebuild claims).
|
||||
func NewLifecycleMetrics(reg prometheus.Registerer) *LifecycleMetrics {
|
||||
m := &LifecycleMetrics{
|
||||
BuildDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: "coderd",
|
||||
Name: BuildDurationMetricName,
|
||||
Help: "Duration from workspace build creation to agent ready, by template.",
|
||||
Buckets: []float64{
|
||||
1, // 1s
|
||||
10,
|
||||
30,
|
||||
60, // 1min
|
||||
60 * 5,
|
||||
60 * 10,
|
||||
60 * 30, // 30min
|
||||
60 * 60, // 1hr
|
||||
},
|
||||
NativeHistogramBucketFactor: 1.1,
|
||||
NativeHistogramMaxBucketNumber: 100,
|
||||
NativeHistogramMinResetDuration: time.Hour,
|
||||
}, []string{"template_name", "organization_name", "transition", "status", "is_prebuild"}),
|
||||
}
|
||||
reg.MustRegister(m.BuildDuration)
|
||||
return m
|
||||
}
|
||||
|
||||
// emitBuildDurationMetric records the end-to-end workspace build
|
||||
// duration from build creation to when all agents are ready.
|
||||
func (a *LifecycleAPI) emitBuildDurationMetric(ctx context.Context, resourceID uuid.UUID) {
|
||||
if a.Metrics == nil {
|
||||
return
|
||||
}
|
||||
|
||||
buildInfo, err := a.Database.GetWorkspaceBuildMetricsByResourceID(ctx, resourceID)
|
||||
if err != nil {
|
||||
a.Log.Warn(ctx, "failed to get build info for metrics", slog.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
// Wait until all agents have reached a terminal startup state.
|
||||
if !buildInfo.AllAgentsReady {
|
||||
return
|
||||
}
|
||||
|
||||
// LastAgentReadyAt is the MAX(ready_at) across all agents. Since
|
||||
// we only get here when AllAgentsReady is true, this should always
|
||||
// be valid.
|
||||
if buildInfo.LastAgentReadyAt.IsZero() {
|
||||
a.Log.Warn(ctx, "last_agent_ready_at is unexpectedly zero",
|
||||
slog.F("last_agent_ready_at", buildInfo.LastAgentReadyAt))
|
||||
return
|
||||
}
|
||||
|
||||
duration := buildInfo.LastAgentReadyAt.Sub(buildInfo.CreatedAt).Seconds()
|
||||
|
||||
a.Metrics.BuildDuration.WithLabelValues(
|
||||
buildInfo.TemplateName,
|
||||
buildInfo.OrganizationName,
|
||||
string(buildInfo.Transition),
|
||||
buildInfo.WorstStatus,
|
||||
strconv.FormatBool(buildInfo.IsPrebuild),
|
||||
).Observe(duration)
|
||||
}
|
||||
+56
-19
@@ -37,25 +37,6 @@ func (a *SubAgentAPI) CreateSubAgent(ctx context.Context, req *agentproto.Create
|
||||
//nolint:gocritic // This gives us only the permissions required to do the job.
|
||||
ctx = dbauthz.AsSubAgentAPI(ctx, a.OrganizationID, a.OwnerID)
|
||||
|
||||
parentAgent, err := a.AgentFn(ctx)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get parent agent: %w", err)
|
||||
}
|
||||
|
||||
agentName := req.Name
|
||||
if agentName == "" {
|
||||
return nil, codersdk.ValidationError{
|
||||
Field: "name",
|
||||
Detail: "agent name cannot be empty",
|
||||
}
|
||||
}
|
||||
if !provisioner.AgentNameRegex.MatchString(agentName) {
|
||||
return nil, codersdk.ValidationError{
|
||||
Field: "name",
|
||||
Detail: fmt.Sprintf("agent name %q does not match regex %q", agentName, provisioner.AgentNameRegex),
|
||||
}
|
||||
}
|
||||
|
||||
createdAt := a.Clock.Now()
|
||||
|
||||
displayApps := make([]database.DisplayApp, 0, len(req.DisplayApps))
|
||||
@@ -83,6 +64,62 @@ func (a *SubAgentAPI) CreateSubAgent(ctx context.Context, req *agentproto.Create
|
||||
displayApps = append(displayApps, app)
|
||||
}
|
||||
|
||||
parentAgent, err := a.AgentFn(ctx)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get parent agent: %w", err)
|
||||
}
|
||||
|
||||
// An ID is only given in the request when it is a terraform-defined devcontainer
|
||||
// that has attached resources. These subagents are pre-provisioned by terraform
|
||||
// (the agent record already exists), so we update configurable fields like
|
||||
// display_apps rather than creating a new agent.
|
||||
if req.Id != nil {
|
||||
id, err := uuid.FromBytes(req.Id)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse agent id: %w", err)
|
||||
}
|
||||
|
||||
subAgent, err := a.Database.GetWorkspaceAgentByID(ctx, id)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get workspace agent by id: %w", err)
|
||||
}
|
||||
|
||||
// Validate that the subagent belongs to the current parent agent to
|
||||
// prevent updating subagents from other agents within the same workspace.
|
||||
if !subAgent.ParentID.Valid || subAgent.ParentID.UUID != parentAgent.ID {
|
||||
return nil, xerrors.Errorf("subagent does not belong to this parent agent")
|
||||
}
|
||||
|
||||
if err := a.Database.UpdateWorkspaceAgentDisplayAppsByID(ctx, database.UpdateWorkspaceAgentDisplayAppsByIDParams{
|
||||
ID: id,
|
||||
DisplayApps: displayApps,
|
||||
UpdatedAt: createdAt,
|
||||
}); err != nil {
|
||||
return nil, xerrors.Errorf("update workspace agent display apps: %w", err)
|
||||
}
|
||||
|
||||
return &agentproto.CreateSubAgentResponse{
|
||||
Agent: &agentproto.SubAgent{
|
||||
Name: subAgent.Name,
|
||||
Id: subAgent.ID[:],
|
||||
AuthToken: subAgent.AuthToken[:],
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
agentName := req.Name
|
||||
if agentName == "" {
|
||||
return nil, codersdk.ValidationError{
|
||||
Field: "name",
|
||||
Detail: "agent name cannot be empty",
|
||||
}
|
||||
}
|
||||
if !provisioner.AgentNameRegex.MatchString(agentName) {
|
||||
return nil, codersdk.ValidationError{
|
||||
Field: "name",
|
||||
Detail: fmt.Sprintf("agent name %q does not match regex %q", agentName, provisioner.AgentNameRegex),
|
||||
}
|
||||
}
|
||||
subAgent, err := a.Database.InsertWorkspaceAgent(ctx, database.InsertWorkspaceAgentParams{
|
||||
ID: uuid.New(),
|
||||
ParentID: uuid.NullUUID{Valid: true, UUID: parentAgent.ID},
|
||||
|
||||
@@ -1132,6 +1132,225 @@ func TestSubAgentAPI(t *testing.T) {
|
||||
require.Equal(t, "Custom App", apps[0].DisplayName)
|
||||
})
|
||||
|
||||
t.Run("CreateSubAgentUpdatesExisting", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
baseChildAgent := database.WorkspaceAgent{
|
||||
Name: "existing-child-agent",
|
||||
Directory: "/workspaces/test",
|
||||
Architecture: "amd64",
|
||||
OperatingSystem: "linux",
|
||||
DisplayApps: []database.DisplayApp{database.DisplayAppVscode},
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
name string
|
||||
setup func(t *testing.T, db database.Store, agent database.WorkspaceAgent) *proto.CreateSubAgentRequest
|
||||
wantErr string
|
||||
check func(t *testing.T, ctx context.Context, db database.Store, resp *proto.CreateSubAgentResponse, agent database.WorkspaceAgent)
|
||||
}
|
||||
|
||||
tests := []testCase{
|
||||
{
|
||||
name: "OK",
|
||||
setup: func(t *testing.T, db database.Store, agent database.WorkspaceAgent) *proto.CreateSubAgentRequest {
|
||||
// Given: An existing child agent with some display apps.
|
||||
childAgent := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{
|
||||
ParentID: uuid.NullUUID{Valid: true, UUID: agent.ID},
|
||||
ResourceID: agent.ResourceID,
|
||||
Name: baseChildAgent.Name,
|
||||
Directory: baseChildAgent.Directory,
|
||||
Architecture: baseChildAgent.Architecture,
|
||||
OperatingSystem: baseChildAgent.OperatingSystem,
|
||||
DisplayApps: baseChildAgent.DisplayApps,
|
||||
})
|
||||
|
||||
// When: We call CreateSubAgent with the existing agent's ID and new display apps.
|
||||
return &proto.CreateSubAgentRequest{
|
||||
Id: childAgent.ID[:],
|
||||
DisplayApps: []proto.CreateSubAgentRequest_DisplayApp{
|
||||
proto.CreateSubAgentRequest_WEB_TERMINAL,
|
||||
proto.CreateSubAgentRequest_SSH_HELPER,
|
||||
},
|
||||
}
|
||||
},
|
||||
check: func(t *testing.T, ctx context.Context, db database.Store, resp *proto.CreateSubAgentResponse, agent database.WorkspaceAgent) {
|
||||
// Then: The response contains the existing agent's details.
|
||||
require.NotNil(t, resp.Agent)
|
||||
require.Equal(t, baseChildAgent.Name, resp.Agent.Name)
|
||||
|
||||
agentID, err := uuid.FromBytes(resp.Agent.Id)
|
||||
require.NoError(t, err)
|
||||
|
||||
// And: The database agent's display apps are updated.
|
||||
updatedAgent, err := db.GetWorkspaceAgentByID(dbauthz.AsSystemRestricted(ctx), agentID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, updatedAgent.DisplayApps, 2)
|
||||
require.Contains(t, updatedAgent.DisplayApps, database.DisplayAppWebTerminal)
|
||||
require.Contains(t, updatedAgent.DisplayApps, database.DisplayAppSSHHelper)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "OK_OtherFieldsNotModified",
|
||||
setup: func(t *testing.T, db database.Store, agent database.WorkspaceAgent) *proto.CreateSubAgentRequest {
|
||||
// Given: An existing child agent with specific properties.
|
||||
childAgent := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{
|
||||
ParentID: uuid.NullUUID{Valid: true, UUID: agent.ID},
|
||||
ResourceID: agent.ResourceID,
|
||||
Name: baseChildAgent.Name,
|
||||
Directory: baseChildAgent.Directory,
|
||||
Architecture: baseChildAgent.Architecture,
|
||||
OperatingSystem: baseChildAgent.OperatingSystem,
|
||||
DisplayApps: baseChildAgent.DisplayApps,
|
||||
})
|
||||
|
||||
// When: We call CreateSubAgent with different values for name, directory, arch, and OS.
|
||||
return &proto.CreateSubAgentRequest{
|
||||
Id: childAgent.ID[:],
|
||||
Name: "different-name",
|
||||
Directory: "/different/path",
|
||||
Architecture: "arm64",
|
||||
OperatingSystem: "darwin",
|
||||
DisplayApps: []proto.CreateSubAgentRequest_DisplayApp{
|
||||
proto.CreateSubAgentRequest_WEB_TERMINAL,
|
||||
},
|
||||
}
|
||||
},
|
||||
check: func(t *testing.T, ctx context.Context, db database.Store, resp *proto.CreateSubAgentResponse, agent database.WorkspaceAgent) {
|
||||
// Then: The response contains the original agent name, not the new one.
|
||||
require.NotNil(t, resp.Agent)
|
||||
require.Equal(t, baseChildAgent.Name, resp.Agent.Name)
|
||||
|
||||
agentID, err := uuid.FromBytes(resp.Agent.Id)
|
||||
require.NoError(t, err)
|
||||
|
||||
// And: The database agent's other fields are unchanged.
|
||||
updatedAgent, err := db.GetWorkspaceAgentByID(dbauthz.AsSystemRestricted(ctx), agentID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, baseChildAgent.Name, updatedAgent.Name)
|
||||
require.Equal(t, baseChildAgent.Directory, updatedAgent.Directory)
|
||||
require.Equal(t, baseChildAgent.Architecture, updatedAgent.Architecture)
|
||||
require.Equal(t, baseChildAgent.OperatingSystem, updatedAgent.OperatingSystem)
|
||||
|
||||
// But display apps should be updated.
|
||||
require.Len(t, updatedAgent.DisplayApps, 1)
|
||||
require.Equal(t, database.DisplayAppWebTerminal, updatedAgent.DisplayApps[0])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Error/MalformedID",
|
||||
setup: func(t *testing.T, db database.Store, agent database.WorkspaceAgent) *proto.CreateSubAgentRequest {
|
||||
// When: We call CreateSubAgent with malformed ID bytes (not 16 bytes).
|
||||
// uuid.FromBytes requires exactly 16 bytes, so we provide fewer.
|
||||
return &proto.CreateSubAgentRequest{
|
||||
Id: []byte("short"),
|
||||
}
|
||||
},
|
||||
wantErr: "parse agent id",
|
||||
},
|
||||
{
|
||||
name: "Error/AgentNotFound",
|
||||
setup: func(t *testing.T, db database.Store, agent database.WorkspaceAgent) *proto.CreateSubAgentRequest {
|
||||
// When: We call CreateSubAgent with a non-existent agent ID.
|
||||
nonExistentID := uuid.New()
|
||||
return &proto.CreateSubAgentRequest{
|
||||
Id: nonExistentID[:],
|
||||
}
|
||||
},
|
||||
wantErr: "get workspace agent by id",
|
||||
},
|
||||
{
|
||||
name: "Error/ParentMismatch",
|
||||
setup: func(t *testing.T, db database.Store, agent database.WorkspaceAgent) *proto.CreateSubAgentRequest {
|
||||
// Create a second agent (sibling) within the same workspace/resource.
|
||||
// This sibling has a different parent ID (or no parent).
|
||||
siblingAgent := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{
|
||||
ParentID: uuid.NullUUID{Valid: false}, // No parent - it's a top-level agent
|
||||
ResourceID: agent.ResourceID,
|
||||
Name: "sibling-agent",
|
||||
Directory: "/workspaces/sibling",
|
||||
Architecture: "amd64",
|
||||
OperatingSystem: "linux",
|
||||
})
|
||||
|
||||
// Create a child of the sibling agent (not our agent).
|
||||
childOfSibling := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{
|
||||
ParentID: uuid.NullUUID{Valid: true, UUID: siblingAgent.ID},
|
||||
ResourceID: agent.ResourceID,
|
||||
Name: "child-of-sibling",
|
||||
Directory: "/workspaces/test",
|
||||
Architecture: "amd64",
|
||||
OperatingSystem: "linux",
|
||||
})
|
||||
|
||||
// When: Our API (which is for `agent`) tries to update the child of `siblingAgent`.
|
||||
return &proto.CreateSubAgentRequest{
|
||||
Id: childOfSibling.ID[:],
|
||||
DisplayApps: []proto.CreateSubAgentRequest_DisplayApp{
|
||||
proto.CreateSubAgentRequest_VSCODE,
|
||||
},
|
||||
}
|
||||
},
|
||||
wantErr: "subagent does not belong to this parent agent",
|
||||
},
|
||||
|
||||
{
|
||||
name: "Error/NoParentID",
|
||||
setup: func(t *testing.T, db database.Store, agent database.WorkspaceAgent) *proto.CreateSubAgentRequest {
|
||||
// Given: An agent without a parent (a top-level agent).
|
||||
topLevelAgent := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{
|
||||
ParentID: uuid.NullUUID{Valid: false}, // No parent
|
||||
ResourceID: agent.ResourceID,
|
||||
Name: "top-level-agent",
|
||||
Directory: "/workspaces/test",
|
||||
Architecture: "amd64",
|
||||
OperatingSystem: "linux",
|
||||
})
|
||||
|
||||
// When: We try to update this agent as if it were a subagent.
|
||||
return &proto.CreateSubAgentRequest{
|
||||
Id: topLevelAgent.ID[:],
|
||||
DisplayApps: []proto.CreateSubAgentRequest_DisplayApp{
|
||||
proto.CreateSubAgentRequest_VSCODE,
|
||||
},
|
||||
}
|
||||
},
|
||||
wantErr: "subagent does not belong to this parent agent",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
log = testutil.Logger(t)
|
||||
clock = quartz.NewMock(t)
|
||||
|
||||
db, org = newDatabaseWithOrg(t)
|
||||
user, agent = newUserWithWorkspaceAgent(t, db, org)
|
||||
api = newAgentAPI(t, log, db, clock, user, org, agent)
|
||||
)
|
||||
|
||||
req := tc.setup(t, db, agent)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
resp, err := api.CreateSubAgent(ctx, req)
|
||||
|
||||
if tc.wantErr != "" {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tc.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
if tc.check != nil {
|
||||
tc.check(t, ctx, db, resp, agent)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("ListSubAgents", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
+21
-4
@@ -977,10 +977,27 @@ func (api *API) authAndDoWithTaskAppClient(
|
||||
ctx := r.Context()
|
||||
|
||||
if task.Status != database.TaskStatusActive {
|
||||
return httperror.NewResponseError(http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Task status must be active.",
|
||||
Detail: fmt.Sprintf("Task status is %q, it must be %q to interact with the task.", task.Status, codersdk.TaskStatusActive),
|
||||
})
|
||||
// Return 409 Conflict for valid requests blocked by current state
|
||||
// (pending/initializing are transitional, paused requires resume).
|
||||
// Return 400 Bad Request for error/unknown states.
|
||||
switch task.Status {
|
||||
case database.TaskStatusPending, database.TaskStatusInitializing:
|
||||
return httperror.NewResponseError(http.StatusConflict, codersdk.Response{
|
||||
Message: fmt.Sprintf("Task is %s.", task.Status),
|
||||
Detail: "The task is resuming. Wait for the task to become active before sending messages.",
|
||||
})
|
||||
case database.TaskStatusPaused:
|
||||
return httperror.NewResponseError(http.StatusConflict, codersdk.Response{
|
||||
Message: "Task is paused.",
|
||||
Detail: "Resume the task to send messages.",
|
||||
})
|
||||
default:
|
||||
// Default handler for error and unknown status.
|
||||
return httperror.NewResponseError(http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Task must be active.",
|
||||
Detail: fmt.Sprintf("Task status is %q, it must be %q to interact with the task.", task.Status, codersdk.TaskStatusActive),
|
||||
})
|
||||
}
|
||||
}
|
||||
if !task.WorkspaceID.Valid {
|
||||
return httperror.NewResponseError(http.StatusBadRequest, codersdk.Response{
|
||||
|
||||
+298
-65
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/coderd/notifications"
|
||||
"github.com/coder/coder/v2/coderd/notifications/notificationstest"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
"github.com/coder/coder/v2/coderd/util/slice"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
@@ -39,6 +40,66 @@ import (
|
||||
"github.com/coder/quartz"
|
||||
)
|
||||
|
||||
// createTaskInState is a helper to create a task in the desired state.
|
||||
// It returns a function that takes context, test, and status, and returns the task ID.
|
||||
// The caller is responsible for setting up the database, owner, and user.
|
||||
func createTaskInState(db database.Store, ownerSubject rbac.Subject, ownerOrgID, userID uuid.UUID) func(context.Context, *testing.T, database.TaskStatus) uuid.UUID {
|
||||
return func(ctx context.Context, t *testing.T, status database.TaskStatus) uuid.UUID {
|
||||
ctx = dbauthz.As(ctx, ownerSubject)
|
||||
|
||||
builder := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: ownerOrgID,
|
||||
OwnerID: userID,
|
||||
}).
|
||||
WithTask(database.TaskTable{
|
||||
OrganizationID: ownerOrgID,
|
||||
OwnerID: userID,
|
||||
}, nil)
|
||||
|
||||
switch status {
|
||||
case database.TaskStatusPending:
|
||||
builder = builder.Pending()
|
||||
case database.TaskStatusInitializing:
|
||||
builder = builder.Starting()
|
||||
case database.TaskStatusPaused:
|
||||
builder = builder.Seed(database.WorkspaceBuild{
|
||||
Transition: database.WorkspaceTransitionStop,
|
||||
})
|
||||
case database.TaskStatusError:
|
||||
// For error state, create a completed build then manipulate app health.
|
||||
default:
|
||||
require.Fail(t, "unsupported task status in test helper", "status: %s", status)
|
||||
}
|
||||
|
||||
resp := builder.Do()
|
||||
taskID := resp.Task.ID
|
||||
|
||||
// Post-process by manipulating agent and app state.
|
||||
if status == database.TaskStatusError {
|
||||
// First, set agent to ready state so agent_status returns 'active'.
|
||||
// This ensures the cascade reaches app_status.
|
||||
err := db.UpdateWorkspaceAgentLifecycleStateByID(ctx, database.UpdateWorkspaceAgentLifecycleStateByIDParams{
|
||||
ID: resp.Agents[0].ID,
|
||||
LifecycleState: database.WorkspaceAgentLifecycleStateReady,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then set workspace app health to unhealthy to trigger error state.
|
||||
apps, err := db.GetWorkspaceAppsByAgentID(ctx, resp.Agents[0].ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, apps, 1, "expected exactly one app for task")
|
||||
|
||||
err = db.UpdateWorkspaceAppHealthByID(ctx, database.UpdateWorkspaceAppHealthByIDParams{
|
||||
ID: apps[0].ID,
|
||||
Health: database.WorkspaceAppHealthUnhealthy,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
return taskID
|
||||
}
|
||||
}
|
||||
|
||||
func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -398,6 +459,144 @@ func TestTasks(t *testing.T) {
|
||||
require.NoError(t, err, "should be possible to delete a task with no workspace")
|
||||
})
|
||||
|
||||
t.Run("SnapshotCleanupOnDeletion", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
template := createAITemplate(t, client, user)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
userObj, err := client.User(ctx, user.UserID.String())
|
||||
require.NoError(t, err)
|
||||
userSubject := coderdtest.AuthzUserSubject(userObj)
|
||||
|
||||
task, err := client.CreateTask(ctx, "me", codersdk.CreateTaskRequest{
|
||||
TemplateVersionID: template.ActiveVersionID,
|
||||
Input: "delete me with snapshot",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
ws, err := client.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID)
|
||||
|
||||
// Create a snapshot for the task.
|
||||
snapshotJSON := `{"format":"agentapi","data":{"messages":[{"role":"user","content":"test"}]}}`
|
||||
err = db.UpsertTaskSnapshot(dbauthz.As(ctx, userSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: task.ID,
|
||||
LogSnapshot: json.RawMessage(snapshotJSON),
|
||||
LogSnapshotCreatedAt: dbtime.Now(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify snapshot exists.
|
||||
_, err = db.GetTaskSnapshot(dbauthz.As(ctx, userSubject), task.ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Delete the task.
|
||||
err = client.DeleteTask(ctx, "me", task.ID)
|
||||
require.NoError(t, err, "delete task request should be accepted")
|
||||
|
||||
// Verify snapshot no longer exists.
|
||||
_, err = db.GetTaskSnapshot(dbauthz.As(ctx, userSubject), task.ID)
|
||||
require.ErrorIs(t, err, sql.ErrNoRows, "snapshot should be deleted with task")
|
||||
})
|
||||
|
||||
t.Run("DeletionWithoutSnapshot", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
template := createAITemplate(t, client, user)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
userObj, err := client.User(ctx, user.UserID.String())
|
||||
require.NoError(t, err)
|
||||
userSubject := coderdtest.AuthzUserSubject(userObj)
|
||||
|
||||
task, err := client.CreateTask(ctx, "me", codersdk.CreateTaskRequest{
|
||||
TemplateVersionID: template.ActiveVersionID,
|
||||
Input: "delete me without snapshot",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
ws, err := client.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID)
|
||||
|
||||
// Verify no snapshot exists.
|
||||
_, err = db.GetTaskSnapshot(dbauthz.As(ctx, userSubject), task.ID)
|
||||
require.ErrorIs(t, err, sql.ErrNoRows, "snapshot should not exist initially")
|
||||
|
||||
// Delete the task (should succeed even without snapshot).
|
||||
err = client.DeleteTask(ctx, "me", task.ID)
|
||||
require.NoError(t, err, "delete task should succeed even without snapshot")
|
||||
})
|
||||
|
||||
t.Run("PreservesOtherTaskSnapshots", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
template := createAITemplate(t, client, user)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
userObj, err := client.User(ctx, user.UserID.String())
|
||||
require.NoError(t, err)
|
||||
userSubject := coderdtest.AuthzUserSubject(userObj)
|
||||
|
||||
// Create task A.
|
||||
taskA, err := client.CreateTask(ctx, "me", codersdk.CreateTaskRequest{
|
||||
TemplateVersionID: template.ActiveVersionID,
|
||||
Input: "task A",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
wsA, err := client.Workspace(ctx, taskA.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, wsA.LatestBuild.ID)
|
||||
|
||||
// Create task B.
|
||||
taskB, err := client.CreateTask(ctx, "me", codersdk.CreateTaskRequest{
|
||||
TemplateVersionID: template.ActiveVersionID,
|
||||
Input: "task B",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
wsB, err := client.Workspace(ctx, taskB.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, wsB.LatestBuild.ID)
|
||||
|
||||
// Create snapshots for both tasks.
|
||||
snapshotJSONA := `{"format":"agentapi","data":{"messages":[{"role":"user","content":"task A"}]}}`
|
||||
err = db.UpsertTaskSnapshot(dbauthz.As(ctx, userSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskA.ID,
|
||||
LogSnapshot: json.RawMessage(snapshotJSONA),
|
||||
LogSnapshotCreatedAt: dbtime.Now(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
snapshotJSONB := `{"format":"agentapi","data":{"messages":[{"role":"user","content":"task B"}]}}`
|
||||
err = db.UpsertTaskSnapshot(dbauthz.As(ctx, userSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskB.ID,
|
||||
LogSnapshot: json.RawMessage(snapshotJSONB),
|
||||
LogSnapshotCreatedAt: dbtime.Now(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Delete task A.
|
||||
err = client.DeleteTask(ctx, "me", taskA.ID)
|
||||
require.NoError(t, err, "delete task A should succeed")
|
||||
|
||||
// Verify task A's snapshot is removed.
|
||||
_, err = db.GetTaskSnapshot(dbauthz.As(ctx, userSubject), taskA.ID)
|
||||
require.ErrorIs(t, err, sql.ErrNoRows, "task A snapshot should be deleted")
|
||||
|
||||
// Verify task B's snapshot still exists.
|
||||
_, err = db.GetTaskSnapshot(dbauthz.As(ctx, userSubject), taskB.ID)
|
||||
require.NoError(t, err, "task B snapshot should still exist")
|
||||
})
|
||||
|
||||
t.Run("DeletingTaskWorkspaceDeletesTask", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -591,6 +790,94 @@ func TestTasks(t *testing.T) {
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
require.Equal(t, http.StatusNotFound, sdkErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("SendToNonActiveStates", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
|
||||
ownerUser, err := client.User(ctx, owner.UserID.String())
|
||||
require.NoError(t, err)
|
||||
ownerSubject := coderdtest.AuthzUserSubject(ownerUser)
|
||||
|
||||
// Create a regular user for task ownership.
|
||||
_, user := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
|
||||
createTask := createTaskInState(db, ownerSubject, owner.OrganizationID, user.ID)
|
||||
|
||||
t.Run("Paused", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPaused)
|
||||
|
||||
err := client.TaskSend(ctx, "me", taskID, codersdk.TaskSendRequest{
|
||||
Input: "Hello",
|
||||
})
|
||||
|
||||
var sdkErr *codersdk.Error
|
||||
require.Error(t, err)
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
require.Equal(t, http.StatusConflict, sdkErr.StatusCode())
|
||||
require.Contains(t, sdkErr.Message, "paused")
|
||||
require.Contains(t, sdkErr.Detail, "Resume")
|
||||
})
|
||||
|
||||
t.Run("Initializing", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTask(ctx, t, database.TaskStatusInitializing)
|
||||
|
||||
err := client.TaskSend(ctx, "me", taskID, codersdk.TaskSendRequest{
|
||||
Input: "Hello",
|
||||
})
|
||||
|
||||
var sdkErr *codersdk.Error
|
||||
require.Error(t, err)
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
require.Equal(t, http.StatusConflict, sdkErr.StatusCode())
|
||||
require.Contains(t, sdkErr.Message, "initializing")
|
||||
require.Contains(t, sdkErr.Detail, "resuming")
|
||||
})
|
||||
|
||||
t.Run("Pending", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPending)
|
||||
|
||||
err := client.TaskSend(ctx, "me", taskID, codersdk.TaskSendRequest{
|
||||
Input: "Hello",
|
||||
})
|
||||
|
||||
var sdkErr *codersdk.Error
|
||||
require.Error(t, err)
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
require.Equal(t, http.StatusConflict, sdkErr.StatusCode())
|
||||
require.Contains(t, sdkErr.Message, "pending")
|
||||
require.Contains(t, sdkErr.Detail, "resuming")
|
||||
})
|
||||
|
||||
t.Run("Error", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTask(ctx, t, database.TaskStatusError)
|
||||
|
||||
err := client.TaskSend(ctx, "me", taskID, codersdk.TaskSendRequest{
|
||||
Input: "Hello",
|
||||
})
|
||||
|
||||
var sdkErr *codersdk.Error
|
||||
require.Error(t, err)
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
require.Equal(t, http.StatusBadRequest, sdkErr.StatusCode())
|
||||
require.Contains(t, sdkErr.Message, "must be active")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Logs", func(t *testing.T) {
|
||||
@@ -737,61 +1024,7 @@ func TestTasks(t *testing.T) {
|
||||
// Create a regular user to test snapshot access.
|
||||
client, user := coderdtest.CreateAnotherUser(t, ownerClient, owner.OrganizationID)
|
||||
|
||||
// Helper to create a task in the desired state.
|
||||
createTaskInState := func(ctx context.Context, t *testing.T, status database.TaskStatus) uuid.UUID {
|
||||
ctx = dbauthz.As(ctx, ownerSubject)
|
||||
|
||||
builder := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: owner.OrganizationID,
|
||||
OwnerID: user.ID,
|
||||
}).
|
||||
WithTask(database.TaskTable{
|
||||
OrganizationID: owner.OrganizationID,
|
||||
OwnerID: user.ID,
|
||||
}, nil)
|
||||
|
||||
switch status {
|
||||
case database.TaskStatusPending:
|
||||
builder = builder.Pending()
|
||||
case database.TaskStatusInitializing:
|
||||
builder = builder.Starting()
|
||||
case database.TaskStatusPaused:
|
||||
builder = builder.Seed(database.WorkspaceBuild{
|
||||
Transition: database.WorkspaceTransitionStop,
|
||||
})
|
||||
case database.TaskStatusError:
|
||||
// For error state, create a completed build then manipulate app health.
|
||||
default:
|
||||
require.Fail(t, "unsupported task status in test helper", "status: %s", status)
|
||||
}
|
||||
|
||||
resp := builder.Do()
|
||||
taskID := resp.Task.ID
|
||||
|
||||
// Post-process by manipulating agent and app state.
|
||||
if status == database.TaskStatusError {
|
||||
// First, set agent to ready state so agent_status returns 'active'.
|
||||
// This ensures the cascade reaches app_status.
|
||||
err := db.UpdateWorkspaceAgentLifecycleStateByID(ctx, database.UpdateWorkspaceAgentLifecycleStateByIDParams{
|
||||
ID: resp.Agents[0].ID,
|
||||
LifecycleState: database.WorkspaceAgentLifecycleStateReady,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then set workspace app health to unhealthy to trigger error state.
|
||||
apps, err := db.GetWorkspaceAppsByAgentID(ctx, resp.Agents[0].ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, apps, 1, "expected exactly one app for task")
|
||||
|
||||
err = db.UpdateWorkspaceAppHealthByID(ctx, database.UpdateWorkspaceAppHealthByIDParams{
|
||||
ID: apps[0].ID,
|
||||
Health: database.WorkspaceAppHealthUnhealthy,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
return taskID
|
||||
}
|
||||
createTask := createTaskInState(db, ownerSubject, owner.OrganizationID, user.ID)
|
||||
|
||||
// Prepare snapshot data used across tests.
|
||||
snapshotMessages := []agentapisdk.Message{
|
||||
@@ -853,7 +1086,7 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusPending)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPending)
|
||||
|
||||
err := db.UpsertTaskSnapshot(dbauthz.As(ctx, ownerSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskID,
|
||||
@@ -871,7 +1104,7 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusInitializing)
|
||||
taskID := createTask(ctx, t, database.TaskStatusInitializing)
|
||||
|
||||
err := db.UpsertTaskSnapshot(dbauthz.As(ctx, ownerSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskID,
|
||||
@@ -889,7 +1122,7 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusPaused)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPaused)
|
||||
|
||||
err := db.UpsertTaskSnapshot(dbauthz.As(ctx, ownerSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskID,
|
||||
@@ -907,7 +1140,7 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusPending)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPending)
|
||||
|
||||
logsResp, err := client.TaskLogs(ctx, "me", taskID)
|
||||
require.NoError(t, err)
|
||||
@@ -921,7 +1154,7 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusPending)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPending)
|
||||
|
||||
invalidEnvelope := coderd.TaskLogSnapshotEnvelope{
|
||||
Format: "unknown-format",
|
||||
@@ -950,7 +1183,7 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusPending)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPending)
|
||||
|
||||
err := db.UpsertTaskSnapshot(dbauthz.As(ctx, ownerSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskID,
|
||||
@@ -971,7 +1204,7 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusError)
|
||||
taskID := createTask(ctx, t, database.TaskStatusError)
|
||||
|
||||
_, err := client.TaskLogs(ctx, "me", taskID)
|
||||
require.Error(t, err)
|
||||
@@ -997,12 +1230,12 @@ func TestTasks(t *testing.T) {
|
||||
wantErrStatusCode int
|
||||
}{
|
||||
{
|
||||
name: "TaskStatusInitializing",
|
||||
name: "TaskStatusPending",
|
||||
// We want to disable the provisioner so that the task
|
||||
// never gets provisioned (ensuring it stays in Initializing).
|
||||
// never gets picked up (ensuring it stays in Pending).
|
||||
disableProvisioner: true,
|
||||
taskInput: "Valid prompt",
|
||||
wantStatus: codersdk.TaskStatusInitializing,
|
||||
wantStatus: codersdk.TaskStatusPending,
|
||||
wantErr: "Unable to update",
|
||||
wantErrStatusCode: http.StatusConflict,
|
||||
},
|
||||
|
||||
Generated
+938
-11
File diff suppressed because it is too large
Load Diff
Generated
+916
-11
File diff suppressed because it is too large
Load Diff
@@ -95,15 +95,26 @@ func (t *Tracker) FlushToDB(ctx context.Context, db database.Store, replicaID uu
|
||||
t.mu.Unlock()
|
||||
|
||||
//nolint:gocritic // This is the actual package doing boundary usage tracking.
|
||||
_, err := db.UpsertBoundaryUsageStats(dbauthz.AsBoundaryUsageTracker(ctx), database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replicaID,
|
||||
UniqueWorkspacesCount: workspaceCount, // cumulative, for UPDATE
|
||||
UniqueUsersCount: userCount, // cumulative, for UPDATE
|
||||
UniqueWorkspacesDelta: workspaceDelta, // delta, for INSERT
|
||||
UniqueUsersDelta: userDelta, // delta, for INSERT
|
||||
AllowedRequests: allowed,
|
||||
DeniedRequests: denied,
|
||||
})
|
||||
authCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
err := db.InTx(func(tx database.Store) error {
|
||||
// The advisory lock ensures a clean period cutover by preventing
|
||||
// this upsert from racing with the aggregate+delete in
|
||||
// GetAndResetBoundaryUsageSummary. Without it, upserted data
|
||||
// could be lost or miscounted across periods.
|
||||
if err := tx.AcquireLock(authCtx, database.LockIDBoundaryUsageStats); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := tx.UpsertBoundaryUsageStats(authCtx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replicaID,
|
||||
UniqueWorkspacesCount: workspaceCount, // cumulative, for UPDATE
|
||||
UniqueUsersCount: userCount, // cumulative, for UPDATE
|
||||
UniqueWorkspacesDelta: workspaceDelta, // delta, for INSERT
|
||||
UniqueUsersDelta: userDelta, // delta, for INSERT
|
||||
AllowedRequests: allowed,
|
||||
DeniedRequests: denied,
|
||||
})
|
||||
return err
|
||||
}, nil)
|
||||
|
||||
// Always reset cumulative counts to prevent unbounded memory growth (e.g.
|
||||
// if the DB is unreachable). Copy delta maps to preserve any Track() calls
|
||||
|
||||
@@ -45,7 +45,7 @@ func TestTracker_Track_Single(t *testing.T) {
|
||||
|
||||
// Verify the data was written correctly.
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(1), summary.UniqueUsers)
|
||||
@@ -73,7 +73,7 @@ func TestTracker_Track_DuplicateWorkspaceUser(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), summary.UniqueWorkspaces, "should be 1 unique workspace")
|
||||
require.Equal(t, int64(1), summary.UniqueUsers, "should be 1 unique user")
|
||||
@@ -102,7 +102,7 @@ func TestTracker_Track_MultipleWorkspacesUsers(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(3), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(2), summary.UniqueUsers)
|
||||
@@ -140,7 +140,7 @@ func TestTracker_Track_Concurrent(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(numGoroutines), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(numGoroutines), summary.UniqueUsers)
|
||||
@@ -175,7 +175,7 @@ func TestTracker_FlushToDB_Accumulates(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(1), summary.UniqueUsers)
|
||||
@@ -202,7 +202,7 @@ func TestTracker_FlushToDB_NewPeriod(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Simulate telemetry reset (new period).
|
||||
err = db.ResetBoundaryUsageStats(boundaryCtx)
|
||||
_, err = db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Track new data.
|
||||
@@ -215,7 +215,7 @@ func TestTracker_FlushToDB_NewPeriod(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// The summary should only contain the new data after reset.
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), summary.UniqueWorkspaces, "should only count new workspace")
|
||||
require.Equal(t, int64(1), summary.UniqueUsers, "should only count new user")
|
||||
@@ -237,7 +237,7 @@ func TestTracker_FlushToDB_NoActivity(t *testing.T) {
|
||||
|
||||
// Verify nothing was written to DB.
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(0), summary.AllowedRequests)
|
||||
@@ -265,7 +265,7 @@ func TestUpsertBoundaryUsageStats_Insert(t *testing.T) {
|
||||
require.True(t, newPeriod, "should return true for insert")
|
||||
|
||||
// Verify INSERT used the delta values, not cumulative.
|
||||
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(5), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(3), summary.UniqueUsers)
|
||||
@@ -301,7 +301,7 @@ func TestUpsertBoundaryUsageStats_Update(t *testing.T) {
|
||||
require.False(t, newPeriod, "should return false for update")
|
||||
|
||||
// Verify UPDATE used cumulative values.
|
||||
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(8), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(5), summary.UniqueUsers)
|
||||
@@ -309,7 +309,7 @@ func TestUpsertBoundaryUsageStats_Update(t *testing.T) {
|
||||
require.Equal(t, int64(10+20), summary.DeniedRequests)
|
||||
}
|
||||
|
||||
func TestGetBoundaryUsageSummary_MultipleReplicas(t *testing.T) {
|
||||
func TestGetAndResetBoundaryUsageSummary_MultipleReplicas(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
@@ -347,7 +347,7 @@ func TestGetBoundaryUsageSummary_MultipleReplicas(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify aggregation (SUM of all replicas).
|
||||
@@ -357,13 +357,13 @@ func TestGetBoundaryUsageSummary_MultipleReplicas(t *testing.T) {
|
||||
require.Equal(t, int64(45), summary.DeniedRequests) // 10 + 15 + 20
|
||||
}
|
||||
|
||||
func TestGetBoundaryUsageSummary_Empty(t *testing.T) {
|
||||
func TestGetAndResetBoundaryUsageSummary_Empty(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := dbauthz.AsBoundaryUsageTracker(context.Background())
|
||||
|
||||
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
|
||||
// COALESCE should return 0 for all columns.
|
||||
@@ -373,7 +373,7 @@ func TestGetBoundaryUsageSummary_Empty(t *testing.T) {
|
||||
require.Equal(t, int64(0), summary.DeniedRequests)
|
||||
}
|
||||
|
||||
func TestResetBoundaryUsageStats(t *testing.T) {
|
||||
func TestGetAndResetBoundaryUsageSummary_DeletesData(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
@@ -391,61 +391,19 @@ func TestResetBoundaryUsageStats(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Verify data exists.
|
||||
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, summary.AllowedRequests, int64(0))
|
||||
|
||||
// Reset.
|
||||
err = db.ResetBoundaryUsageStats(ctx)
|
||||
// Should return the summary AND delete all data.
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1+2+3+4+5), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(10+20+30+40+50), summary.AllowedRequests)
|
||||
|
||||
// Verify all data is gone.
|
||||
summary, err = db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
summary, err = db.GetAndResetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(0), summary.AllowedRequests)
|
||||
}
|
||||
|
||||
func TestDeleteBoundaryUsageStatsByReplicaID(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := dbauthz.AsBoundaryUsageTracker(context.Background())
|
||||
|
||||
replica1 := uuid.New()
|
||||
replica2 := uuid.New()
|
||||
|
||||
// Insert stats for 2 replicas. Delta fields are used for INSERT.
|
||||
_, err := db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replica1,
|
||||
UniqueWorkspacesDelta: 10,
|
||||
UniqueUsersDelta: 5,
|
||||
AllowedRequests: 100,
|
||||
DeniedRequests: 10,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replica2,
|
||||
UniqueWorkspacesDelta: 20,
|
||||
UniqueUsersDelta: 10,
|
||||
AllowedRequests: 200,
|
||||
DeniedRequests: 20,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Delete replica1's stats.
|
||||
err = db.DeleteBoundaryUsageStatsByReplicaID(ctx, replica1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify only replica2's stats remain.
|
||||
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(20), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(200), summary.AllowedRequests)
|
||||
}
|
||||
|
||||
func TestTracker_TelemetryCycle(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -477,8 +435,8 @@ func TestTracker_TelemetryCycle(t *testing.T) {
|
||||
require.NoError(t, tracker2.FlushToDB(ctx, db, replica2))
|
||||
require.NoError(t, tracker3.FlushToDB(ctx, db, replica3))
|
||||
|
||||
// Telemetry aggregates.
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
// Telemetry aggregates and resets (simulating telemetry report sent).
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify aggregation.
|
||||
@@ -487,15 +445,12 @@ func TestTracker_TelemetryCycle(t *testing.T) {
|
||||
require.Equal(t, int64(105), summary.AllowedRequests) // 25 + 75 + 5
|
||||
require.Equal(t, int64(15), summary.DeniedRequests) // 3 + 12 + 0
|
||||
|
||||
// Telemetry resets stats (simulating telemetry report sent).
|
||||
require.NoError(t, db.ResetBoundaryUsageStats(boundaryCtx))
|
||||
|
||||
// Next flush from trackers should detect new period.
|
||||
tracker1.Track(uuid.New(), uuid.New(), 1, 0)
|
||||
require.NoError(t, tracker1.FlushToDB(ctx, db, replica1))
|
||||
|
||||
// Verify trackers reset their in-memory state.
|
||||
summary, err = db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
summary, err = db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(1), summary.AllowedRequests)
|
||||
@@ -513,30 +468,24 @@ func TestTracker_FlushToDB_NoStaleDataAfterReset(t *testing.T) {
|
||||
workspaceID := uuid.New()
|
||||
ownerID := uuid.New()
|
||||
|
||||
// Track some data, flush, and verify.
|
||||
// Track some data and flush.
|
||||
tracker.Track(workspaceID, ownerID, 10, 5)
|
||||
err := tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
// Simulate telemetry reset (new period) - this also verifies the data.
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(10), summary.AllowedRequests)
|
||||
|
||||
// Simulate telemetry reset (new period).
|
||||
err = db.ResetBoundaryUsageStats(boundaryCtx)
|
||||
require.NoError(t, err)
|
||||
summary, err = db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0), summary.AllowedRequests)
|
||||
|
||||
// Flush again without any new Track() calls. This should not write stale
|
||||
// data back to the DB.
|
||||
err = tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Summary should be empty (no stale data written).
|
||||
summary, err = db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
summary, err = db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(0), summary.UniqueUsers)
|
||||
@@ -582,7 +531,7 @@ func TestTracker_ConcurrentFlushAndTrack(t *testing.T) {
|
||||
|
||||
// Verify stats are non-negative.
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.GreaterOrEqual(t, summary.AllowedRequests, int64(0))
|
||||
require.GreaterOrEqual(t, summary.DeniedRequests, int64(0))
|
||||
@@ -597,6 +546,17 @@ type trackDuringUpsertDB struct {
|
||||
userID uuid.UUID
|
||||
}
|
||||
|
||||
func (s *trackDuringUpsertDB) InTx(fn func(database.Store) error, opts *database.TxOptions) error {
|
||||
return s.Store.InTx(func(tx database.Store) error {
|
||||
return fn(&trackDuringUpsertDB{
|
||||
Store: tx,
|
||||
tracker: s.tracker,
|
||||
workspaceID: s.workspaceID,
|
||||
userID: s.userID,
|
||||
})
|
||||
}, opts)
|
||||
}
|
||||
|
||||
func (s *trackDuringUpsertDB) UpsertBoundaryUsageStats(ctx context.Context, arg database.UpsertBoundaryUsageStatsParams) (bool, error) {
|
||||
s.tracker.Track(s.workspaceID, s.userID, 20, 10)
|
||||
return s.Store.UpsertBoundaryUsageStats(ctx, arg)
|
||||
@@ -626,17 +586,12 @@ func TestTracker_TrackDuringFlush(t *testing.T) {
|
||||
err := tracker.FlushToDB(ctx, trackingDB, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify first flush only wrote the initial data.
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(10), summary.AllowedRequests)
|
||||
|
||||
// The second flush should include the Track() call that happened during the
|
||||
// first flush's DB operation.
|
||||
// Second flush captures the Track() that happened during the first flush.
|
||||
err = tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
summary, err = db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
// Verify both flushes are in the summary.
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(10+20), summary.AllowedRequests)
|
||||
require.Equal(t, int64(5+10), summary.DeniedRequests)
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka), Google Inc.
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
@@ -0,0 +1,440 @@
|
||||
// Package cachecompress creates a compressed cache of static files based on an http.FS. It is modified from
|
||||
// https://github.com/go-chi/chi Compressor middleware. See the LICENSE file in this directory for copyright
|
||||
// information.
|
||||
package cachecompress
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
)
|
||||
|
||||
type cacheKey struct {
|
||||
encoding string
|
||||
urlPath string
|
||||
}
|
||||
|
||||
func (c cacheKey) filePath(cacheDir string) string {
|
||||
// URLs can have slashes or other characters we don't want the file system interpreting. So we just encode the path
|
||||
// to a flat base64 filename.
|
||||
filename := base64.URLEncoding.EncodeToString([]byte(c.urlPath))
|
||||
return filepath.Join(cacheDir, c.encoding, filename)
|
||||
}
|
||||
|
||||
func getCacheKey(encoding string, r *http.Request) cacheKey {
|
||||
return cacheKey{
|
||||
encoding: encoding,
|
||||
urlPath: r.URL.Path,
|
||||
}
|
||||
}
|
||||
|
||||
type ref struct {
|
||||
key cacheKey
|
||||
done chan struct{}
|
||||
err chan error
|
||||
}
|
||||
|
||||
// Compressor represents a set of encoding configurations.
|
||||
type Compressor struct {
|
||||
logger slog.Logger
|
||||
// The mapping of encoder names to encoder functions.
|
||||
encoders map[string]EncoderFunc
|
||||
// The mapping of pooled encoders to pools.
|
||||
pooledEncoders map[string]*sync.Pool
|
||||
// The list of encoders in order of decreasing precedence.
|
||||
encodingPrecedence []string
|
||||
level int // The compression level.
|
||||
cacheDir string
|
||||
orig http.FileSystem
|
||||
|
||||
mu sync.Mutex
|
||||
cache map[cacheKey]ref
|
||||
}
|
||||
|
||||
// NewCompressor creates a new Compressor that will handle encoding responses.
|
||||
//
|
||||
// The level should be one of the ones defined in the flate package.
|
||||
// The types are the content types that are allowed to be compressed.
|
||||
func NewCompressor(logger slog.Logger, level int, cacheDir string, orig http.FileSystem) *Compressor {
|
||||
c := &Compressor{
|
||||
logger: logger.Named("cachecompress"),
|
||||
level: level,
|
||||
encoders: make(map[string]EncoderFunc),
|
||||
pooledEncoders: make(map[string]*sync.Pool),
|
||||
cacheDir: cacheDir,
|
||||
orig: orig,
|
||||
cache: make(map[cacheKey]ref),
|
||||
}
|
||||
|
||||
// Set the default encoders. The precedence order uses the reverse
|
||||
// ordering that the encoders were added. This means adding new encoders
|
||||
// will move them to the front of the order.
|
||||
//
|
||||
// TODO:
|
||||
// lzma: Opera.
|
||||
// sdch: Chrome, Android. Gzip output + dictionary header.
|
||||
// br: Brotli, see https://github.com/go-chi/chi/pull/326
|
||||
|
||||
// HTTP 1.1 "deflate" (RFC 2616) stands for DEFLATE data (RFC 1951)
|
||||
// wrapped with zlib (RFC 1950). The zlib wrapper uses Adler-32
|
||||
// checksum compared to CRC-32 used in "gzip" and thus is faster.
|
||||
//
|
||||
// But.. some old browsers (MSIE, Safari 5.1) incorrectly expect
|
||||
// raw DEFLATE data only, without the mentioned zlib wrapper.
|
||||
// Because of this major confusion, most modern browsers try it
|
||||
// both ways, first looking for zlib headers.
|
||||
// Quote by Mark Adler: http://stackoverflow.com/a/9186091/385548
|
||||
//
|
||||
// The list of browsers having problems is quite big, see:
|
||||
// http://zoompf.com/blog/2012/02/lose-the-wait-http-compression
|
||||
// https://web.archive.org/web/20120321182910/http://www.vervestudios.co/projects/compression-tests/results
|
||||
//
|
||||
// That's why we prefer gzip over deflate. It's just more reliable
|
||||
// and not significantly slower than deflate.
|
||||
c.SetEncoder("deflate", encoderDeflate)
|
||||
|
||||
// TODO: Exception for old MSIE browsers that can't handle non-HTML?
|
||||
// https://zoompf.com/blog/2012/02/lose-the-wait-http-compression
|
||||
c.SetEncoder("gzip", encoderGzip)
|
||||
|
||||
// NOTE: Not implemented, intentionally:
|
||||
// case "compress": // LZW. Deprecated.
|
||||
// case "bzip2": // Too slow on-the-fly.
|
||||
// case "zopfli": // Too slow on-the-fly.
|
||||
// case "xz": // Too slow on-the-fly.
|
||||
return c
|
||||
}
|
||||
|
||||
// SetEncoder can be used to set the implementation of a compression algorithm.
|
||||
//
|
||||
// The encoding should be a standardized identifier. See:
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding
|
||||
//
|
||||
// For example, add the Brotli algorithm:
|
||||
//
|
||||
// import brotli_enc "gopkg.in/kothar/brotli-go.v0/enc"
|
||||
//
|
||||
// compressor := middleware.NewCompressor(5, "text/html")
|
||||
// compressor.SetEncoder("br", func(w io.Writer, level int) io.Writer {
|
||||
// params := brotli_enc.NewBrotliParams()
|
||||
// params.SetQuality(level)
|
||||
// return brotli_enc.NewBrotliWriter(params, w)
|
||||
// })
|
||||
func (c *Compressor) SetEncoder(encoding string, fn EncoderFunc) {
|
||||
encoding = strings.ToLower(encoding)
|
||||
if encoding == "" {
|
||||
panic("the encoding can not be empty")
|
||||
}
|
||||
if fn == nil {
|
||||
panic("attempted to set a nil encoder function")
|
||||
}
|
||||
|
||||
// If we are adding a new encoder that is already registered, we have to
|
||||
// clear that one out first.
|
||||
delete(c.pooledEncoders, encoding)
|
||||
delete(c.encoders, encoding)
|
||||
|
||||
// If the encoder supports Resetting (IoReseterWriter), then it can be pooled.
|
||||
encoder := fn(io.Discard, c.level)
|
||||
if _, ok := encoder.(ioResetterWriter); ok {
|
||||
pool := &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return fn(io.Discard, c.level)
|
||||
},
|
||||
}
|
||||
c.pooledEncoders[encoding] = pool
|
||||
}
|
||||
// If the encoder is not in the pooledEncoders, add it to the normal encoders.
|
||||
if _, ok := c.pooledEncoders[encoding]; !ok {
|
||||
c.encoders[encoding] = fn
|
||||
}
|
||||
|
||||
for i, v := range c.encodingPrecedence {
|
||||
if v == encoding {
|
||||
c.encodingPrecedence = append(c.encodingPrecedence[:i], c.encodingPrecedence[i+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
c.encodingPrecedence = append([]string{encoding}, c.encodingPrecedence...)
|
||||
}
|
||||
|
||||
// ServeHTTP returns the response from the orig file system, compressed if possible.
|
||||
func (c *Compressor) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
encoding := c.selectEncoder(r.Header)
|
||||
|
||||
// we can only serve a cached response if all the following:
|
||||
// 1. they requested an encoding we support
|
||||
// 2. they are requesting the whole file, not a range
|
||||
// 3. the method is GET
|
||||
if encoding == "" || r.Header.Get("Range") != "" || r.Method != "GET" {
|
||||
http.FileServer(c.orig).ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Whether we should serve a cached response also depends in a fairly complex way on the path and request
|
||||
// headers. In particular, we don't need a cached response for non-existing files/directories, and should not serve
|
||||
// a cached response if the correct Etag for the file is provided. This logic is all handled by the http.FileServer,
|
||||
// and we don't want to reimplement it here. So, what we'll do is send a HEAD request to the http.FileServer to see
|
||||
// what it would do.
|
||||
headReq := r.Clone(r.Context())
|
||||
headReq.Method = http.MethodHead
|
||||
headRW := &compressResponseWriter{
|
||||
w: io.Discard,
|
||||
headers: make(http.Header),
|
||||
}
|
||||
// deep-copy the headers already set on the response. This includes things like ETags.
|
||||
for key, values := range w.Header() {
|
||||
for _, value := range values {
|
||||
headRW.headers.Add(key, value)
|
||||
}
|
||||
}
|
||||
http.FileServer(c.orig).ServeHTTP(headRW, headReq)
|
||||
if headRW.code != http.StatusOK {
|
||||
// again, fall back to the file server. This is often a 404 Not Found, or a 304 Not Modified if they provided
|
||||
// the correct ETag.
|
||||
http.FileServer(c.orig).ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
cref := c.getRef(encoding, r)
|
||||
c.serveRef(w, r, headRW.headers, cref)
|
||||
}
|
||||
|
||||
func (c *Compressor) serveRef(w http.ResponseWriter, r *http.Request, headers http.Header, cref ref) {
|
||||
select {
|
||||
case <-r.Context().Done():
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
return
|
||||
case <-cref.done:
|
||||
cachePath := cref.key.filePath(c.cacheDir)
|
||||
cacheFile, err := os.Open(cachePath)
|
||||
if err != nil {
|
||||
c.logger.Error(context.Background(), "failed to open compressed cache file",
|
||||
slog.F("cache_path", cachePath), slog.F("url_path", cref.key.urlPath), slog.Error(err))
|
||||
// fall back to uncompressed
|
||||
http.FileServer(c.orig).ServeHTTP(w, r)
|
||||
}
|
||||
defer cacheFile.Close()
|
||||
|
||||
// we need to remove or modify the Content-Length, if any, set by the FileServer because it will be for
|
||||
// uncompressed data and wrong.
|
||||
info, err := cacheFile.Stat()
|
||||
if err != nil {
|
||||
c.logger.Error(context.Background(), "failed to stat compressed cache file",
|
||||
slog.F("cache_path", cachePath), slog.F("url_path", cref.key.urlPath), slog.Error(err))
|
||||
headers.Del("Content-Length")
|
||||
} else {
|
||||
headers.Set("Content-Length", fmt.Sprintf("%d", info.Size()))
|
||||
}
|
||||
|
||||
for key, values := range headers {
|
||||
for _, value := range values {
|
||||
w.Header().Add(key, value)
|
||||
}
|
||||
}
|
||||
w.Header().Set("Content-Encoding", cref.key.encoding)
|
||||
w.Header().Add("Vary", "Accept-Encoding")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err = io.Copy(w, cacheFile)
|
||||
if err != nil {
|
||||
// most commonly, the writer will hang up before we are done.
|
||||
c.logger.Debug(context.Background(), "failed to write compressed cache file", slog.Error(err))
|
||||
}
|
||||
return
|
||||
case <-cref.err:
|
||||
// fall back to uncompressed
|
||||
http.FileServer(c.orig).ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Compressor) getRef(encoding string, r *http.Request) ref {
|
||||
ck := getCacheKey(encoding, r)
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
cref, ok := c.cache[ck]
|
||||
if ok {
|
||||
return cref
|
||||
}
|
||||
// we are the first to encode
|
||||
cref = ref{
|
||||
key: ck,
|
||||
|
||||
done: make(chan struct{}),
|
||||
err: make(chan error),
|
||||
}
|
||||
c.cache[ck] = cref
|
||||
go c.compress(context.Background(), encoding, cref, r)
|
||||
return cref
|
||||
}
|
||||
|
||||
func (c *Compressor) compress(ctx context.Context, encoding string, cref ref, r *http.Request) {
|
||||
cachePath := cref.key.filePath(c.cacheDir)
|
||||
var err error
|
||||
// we want to handle closing either cref.done or cref.err in a defer at the bottom of the stack so that the encoder
|
||||
// and cache file are both closed first (higher in the defer stack). This prevents data races where waiting HTTP
|
||||
// handlers start reading the file before all the data has been flushed.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if rErr := os.Remove(cachePath); rErr != nil {
|
||||
// nolint: gocritic // best effort, just debug log any errors
|
||||
c.logger.Debug(ctx, "failed to remove cache file",
|
||||
slog.F("main_err", err), slog.F("remove_err", rErr), slog.F("cache_path", cachePath))
|
||||
}
|
||||
c.mu.Lock()
|
||||
delete(c.cache, cref.key)
|
||||
c.mu.Unlock()
|
||||
close(cref.err)
|
||||
return
|
||||
}
|
||||
close(cref.done)
|
||||
}()
|
||||
|
||||
cacheDir := filepath.Dir(cachePath)
|
||||
err = os.MkdirAll(cacheDir, 0o700)
|
||||
if err != nil {
|
||||
c.logger.Error(ctx, "failed to create cache directory", slog.F("cache_dir", cacheDir))
|
||||
return
|
||||
}
|
||||
|
||||
// We will truncate and overwrite any existing files. This is important in the case that we get restarted
|
||||
// with the same cache dir, possibly with different source files.
|
||||
cacheFile, err := os.OpenFile(cachePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
|
||||
if err != nil {
|
||||
c.logger.Error(ctx, "failed to open compression cache file",
|
||||
slog.F("path", cachePath), slog.Error(err))
|
||||
return
|
||||
}
|
||||
defer cacheFile.Close()
|
||||
encoder, cleanup := c.getEncoder(encoding, cacheFile)
|
||||
if encoder == nil {
|
||||
// can only hit this if there is a programming error
|
||||
c.logger.Critical(ctx, "got nil encoder", slog.F("encoding", encoding))
|
||||
err = xerrors.New("nil encoder")
|
||||
return
|
||||
}
|
||||
defer cleanup()
|
||||
defer encoder.Close() // ensures we flush, needs to be called before cleanup(), so we defer after it.
|
||||
|
||||
cw := &compressResponseWriter{
|
||||
w: encoder,
|
||||
headers: make(http.Header), // ignored
|
||||
}
|
||||
http.FileServer(c.orig).ServeHTTP(cw, r)
|
||||
if cw.code != http.StatusOK {
|
||||
// log at debug because this is likely just a 404
|
||||
c.logger.Debug(ctx, "file server failed to serve",
|
||||
slog.F("encoding", encoding), slog.F("url_path", cref.key.urlPath), slog.F("http_code", cw.code))
|
||||
// mark the error so that we clean up correctly
|
||||
err = xerrors.New("file server failed to serve")
|
||||
return
|
||||
}
|
||||
// success!
|
||||
}
|
||||
|
||||
// selectEncoder returns the name of the encoder
|
||||
func (c *Compressor) selectEncoder(h http.Header) string {
|
||||
header := h.Get("Accept-Encoding")
|
||||
|
||||
// Parse the names of all accepted algorithms from the header.
|
||||
accepted := strings.Split(strings.ToLower(header), ",")
|
||||
|
||||
// Find supported encoder by accepted list by precedence
|
||||
for _, name := range c.encodingPrecedence {
|
||||
if matchAcceptEncoding(accepted, name) {
|
||||
return name
|
||||
}
|
||||
}
|
||||
|
||||
// No encoder found to match the accepted encoding
|
||||
return ""
|
||||
}
|
||||
|
||||
// getEncoder returns a writer that encodes and writes to the provided writer, and a cleanup func.
|
||||
func (c *Compressor) getEncoder(name string, w io.Writer) (io.WriteCloser, func()) {
|
||||
if pool, ok := c.pooledEncoders[name]; ok {
|
||||
encoder, typeOK := pool.Get().(ioResetterWriter)
|
||||
if !typeOK {
|
||||
return nil, nil
|
||||
}
|
||||
cleanup := func() {
|
||||
pool.Put(encoder)
|
||||
}
|
||||
encoder.Reset(w)
|
||||
return encoder, cleanup
|
||||
}
|
||||
if fn, ok := c.encoders[name]; ok {
|
||||
return fn(w, c.level), func() {}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func matchAcceptEncoding(accepted []string, encoding string) bool {
|
||||
for _, v := range accepted {
|
||||
if strings.Contains(v, encoding) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// An EncoderFunc is a function that wraps the provided io.Writer with a
|
||||
// streaming compression algorithm and returns it.
|
||||
//
|
||||
// In case of failure, the function should return nil.
|
||||
type EncoderFunc func(w io.Writer, level int) io.WriteCloser
|
||||
|
||||
// Interface for types that allow resetting io.Writers.
|
||||
type ioResetterWriter interface {
|
||||
io.WriteCloser
|
||||
Reset(w io.Writer)
|
||||
}
|
||||
|
||||
func encoderGzip(w io.Writer, level int) io.WriteCloser {
|
||||
gw, err := gzip.NewWriterLevel(w, level)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return gw
|
||||
}
|
||||
|
||||
func encoderDeflate(w io.Writer, level int) io.WriteCloser {
|
||||
dw, err := flate.NewWriter(w, level)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return dw
|
||||
}
|
||||
|
||||
type compressResponseWriter struct {
|
||||
w io.Writer
|
||||
headers http.Header
|
||||
code int
|
||||
}
|
||||
|
||||
func (cw *compressResponseWriter) Header() http.Header {
|
||||
return cw.headers
|
||||
}
|
||||
|
||||
func (cw *compressResponseWriter) WriteHeader(code int) {
|
||||
cw.code = code
|
||||
}
|
||||
|
||||
func (cw *compressResponseWriter) Write(p []byte) (int, error) {
|
||||
if cw.code == 0 {
|
||||
cw.code = http.StatusOK
|
||||
}
|
||||
return cw.w.Write(p)
|
||||
}
|
||||
@@ -0,0 +1,227 @@
|
||||
package cachecompress
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/flate"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
func TestCompressorEncodings(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
path string
|
||||
expectedEncoding string
|
||||
acceptedEncodings []string
|
||||
}{
|
||||
{
|
||||
name: "no expected encodings due to no accepted encodings",
|
||||
path: "/file.html",
|
||||
acceptedEncodings: nil,
|
||||
expectedEncoding: "",
|
||||
},
|
||||
{
|
||||
name: "gzip is only encoding",
|
||||
path: "/file.html",
|
||||
acceptedEncodings: []string{"gzip"},
|
||||
expectedEncoding: "gzip",
|
||||
},
|
||||
{
|
||||
name: "gzip is preferred over deflate",
|
||||
path: "/file.html",
|
||||
acceptedEncodings: []string{"gzip", "deflate"},
|
||||
expectedEncoding: "gzip",
|
||||
},
|
||||
{
|
||||
name: "deflate is used",
|
||||
path: "/file.html",
|
||||
acceptedEncodings: []string{"deflate"},
|
||||
expectedEncoding: "deflate",
|
||||
},
|
||||
{
|
||||
name: "nop is preferred",
|
||||
path: "/file.html",
|
||||
acceptedEncodings: []string{"nop, gzip, deflate"},
|
||||
expectedEncoding: "nop",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
logger := testutil.Logger(t)
|
||||
tempDir := t.TempDir()
|
||||
cacheDir := filepath.Join(tempDir, "cache")
|
||||
err := os.MkdirAll(cacheDir, 0o700)
|
||||
require.NoError(t, err)
|
||||
srcDir := filepath.Join(tempDir, "src")
|
||||
err = os.MkdirAll(srcDir, 0o700)
|
||||
require.NoError(t, err)
|
||||
err = os.WriteFile(filepath.Join(srcDir, "file.html"), []byte("textstring"), 0o600)
|
||||
require.NoError(t, err)
|
||||
|
||||
compressor := NewCompressor(logger, 5, cacheDir, http.FS(os.DirFS(srcDir)))
|
||||
if len(compressor.encoders) != 0 || len(compressor.pooledEncoders) != 2 {
|
||||
t.Errorf("gzip and deflate should be pooled")
|
||||
}
|
||||
logger.Debug(context.Background(), "started compressor")
|
||||
|
||||
compressor.SetEncoder("nop", func(w io.Writer, _ int) io.WriteCloser {
|
||||
return nopEncoder{w}
|
||||
})
|
||||
|
||||
if len(compressor.encoders) != 1 {
|
||||
t.Errorf("nop encoder should be stored in the encoders map")
|
||||
}
|
||||
|
||||
ts := httptest.NewServer(compressor)
|
||||
defer ts.Close()
|
||||
// ctx := testutil.Context(t, testutil.WaitShort)
|
||||
ctx := context.Background()
|
||||
header, respString := testRequestWithAcceptedEncodings(ctx, t, ts, "GET", tc.path, tc.acceptedEncodings...)
|
||||
if respString != "textstring" {
|
||||
t.Errorf("response text doesn't match; expected:%q, got:%q", "textstring", respString)
|
||||
}
|
||||
if got := header.Get("Content-Encoding"); got != tc.expectedEncoding {
|
||||
t.Errorf("expected encoding %q but got %q", tc.expectedEncoding, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testRequestWithAcceptedEncodings(ctx context.Context, t *testing.T, ts *httptest.Server, method, path string, encodings ...string) (http.Header, string) {
|
||||
req, err := http.NewRequestWithContext(ctx, method, ts.URL+path, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return nil, ""
|
||||
}
|
||||
if len(encodings) > 0 {
|
||||
encodingsString := strings.Join(encodings, ",")
|
||||
req.Header.Set("Accept-Encoding", encodingsString)
|
||||
}
|
||||
|
||||
transport := http.DefaultTransport.(*http.Transport).Clone()
|
||||
transport.DisableCompression = true // prevent automatically setting gzip
|
||||
|
||||
resp, err := (&http.Client{Transport: transport}).Do(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
respBody := decodeResponseBody(t, resp)
|
||||
defer resp.Body.Close()
|
||||
|
||||
return resp.Header, respBody
|
||||
}
|
||||
|
||||
func decodeResponseBody(t *testing.T, resp *http.Response) string {
|
||||
var reader io.ReadCloser
|
||||
t.Logf("encoding: '%s'", resp.Header.Get("Content-Encoding"))
|
||||
rawBody, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
t.Logf("raw body: %x", rawBody)
|
||||
switch resp.Header.Get("Content-Encoding") {
|
||||
case "gzip":
|
||||
var err error
|
||||
reader, err = gzip.NewReader(bytes.NewReader(rawBody))
|
||||
require.NoError(t, err)
|
||||
case "deflate":
|
||||
reader = flate.NewReader(bytes.NewReader(rawBody))
|
||||
default:
|
||||
return string(rawBody)
|
||||
}
|
||||
respBody, err := io.ReadAll(reader)
|
||||
require.NoError(t, err, "failed to read response body: %T %+v", err, err)
|
||||
err = reader.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
return string(respBody)
|
||||
}
|
||||
|
||||
type nopEncoder struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (nopEncoder) Close() error { return nil }
|
||||
|
||||
// nolint: tparallel // we want to assert the state of the cache, so run synchronously
|
||||
func TestCompressorHeadings(t *testing.T) {
|
||||
t.Parallel()
|
||||
logger := testutil.Logger(t)
|
||||
tempDir := t.TempDir()
|
||||
cacheDir := filepath.Join(tempDir, "cache")
|
||||
err := os.MkdirAll(cacheDir, 0o700)
|
||||
require.NoError(t, err)
|
||||
srcDir := filepath.Join(tempDir, "src")
|
||||
err = os.MkdirAll(srcDir, 0o700)
|
||||
require.NoError(t, err)
|
||||
err = os.WriteFile(filepath.Join(srcDir, "file.html"), []byte("textstring"), 0o600)
|
||||
require.NoError(t, err)
|
||||
|
||||
compressor := NewCompressor(logger, 5, cacheDir, http.FS(os.DirFS(srcDir)))
|
||||
|
||||
ts := httptest.NewServer(compressor)
|
||||
defer ts.Close()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
path string
|
||||
}{
|
||||
{
|
||||
name: "exists",
|
||||
path: "/file.html",
|
||||
},
|
||||
{
|
||||
name: "not found",
|
||||
path: "/missing.html",
|
||||
},
|
||||
{
|
||||
name: "not found directory",
|
||||
path: "/a_directory/",
|
||||
},
|
||||
}
|
||||
|
||||
// nolint: paralleltest // we want to assert the state of the cache, so run synchronously
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
req := httptest.NewRequestWithContext(ctx, "GET", tc.path, nil)
|
||||
|
||||
// request directly from http.FileServer as our baseline response
|
||||
respROrig := httptest.NewRecorder()
|
||||
http.FileServer(http.Dir(srcDir)).ServeHTTP(respROrig, req)
|
||||
respOrig := respROrig.Result()
|
||||
|
||||
req.Header.Add("Accept-Encoding", "gzip")
|
||||
// serve twice so that we go thru cache hit and cache miss code
|
||||
for range 2 {
|
||||
respRec := httptest.NewRecorder()
|
||||
compressor.ServeHTTP(respRec, req)
|
||||
respComp := respRec.Result()
|
||||
|
||||
require.Equal(t, respOrig.StatusCode, respComp.StatusCode)
|
||||
for key, values := range respOrig.Header {
|
||||
if key == "Content-Length" {
|
||||
continue // we don't get length on compressed responses
|
||||
}
|
||||
require.Equal(t, values, respComp.Header[key])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
// only the cache hit should leave a file around
|
||||
files, err := os.ReadDir(srcDir)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, files, 1)
|
||||
}
|
||||
+75
-30
@@ -21,11 +21,9 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/andybalholm/brotli"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/google/uuid"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
@@ -44,6 +42,7 @@ import (
|
||||
"cdr.dev/slog/v3"
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/buildinfo"
|
||||
"github.com/coder/coder/v2/coderd/agentapi"
|
||||
"github.com/coder/coder/v2/coderd/agentapi/metadatabatcher"
|
||||
_ "github.com/coder/coder/v2/coderd/apidoc" // Used for swagger docs.
|
||||
"github.com/coder/coder/v2/coderd/appearance"
|
||||
@@ -91,6 +90,7 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/workspaceapps/appurl"
|
||||
"github.com/coder/coder/v2/coderd/workspacestats"
|
||||
"github.com/coder/coder/v2/coderd/wsbuilder"
|
||||
"github.com/coder/coder/v2/coderd/wspubsub"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/drpcsdk"
|
||||
"github.com/coder/coder/v2/codersdk/healthsdk"
|
||||
@@ -99,6 +99,8 @@ import (
|
||||
"github.com/coder/coder/v2/provisionersdk"
|
||||
"github.com/coder/coder/v2/site"
|
||||
"github.com/coder/coder/v2/tailnet"
|
||||
"github.com/coder/coder/v2/tailnet/eventsink"
|
||||
tailnetproto "github.com/coder/coder/v2/tailnet/proto"
|
||||
"github.com/coder/quartz"
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
@@ -415,7 +417,8 @@ func New(options *Options) *API {
|
||||
options.NetworkTelemetryBatchMaxSize = 1_000
|
||||
}
|
||||
if options.TailnetCoordinator == nil {
|
||||
options.TailnetCoordinator = tailnet.NewCoordinator(options.Logger)
|
||||
eventSink := eventsink.NewEventSink(context.Background(), options.Database, options.Logger)
|
||||
options.TailnetCoordinator = tailnet.NewCoordinator(options.Logger, eventSink)
|
||||
}
|
||||
if options.Auditor == nil {
|
||||
options.Auditor = audit.NewNop()
|
||||
@@ -462,10 +465,6 @@ func New(options *Options) *API {
|
||||
if siteCacheDir != "" {
|
||||
siteCacheDir = filepath.Join(siteCacheDir, "site")
|
||||
}
|
||||
binFS, binHashes, err := site.ExtractOrReadBinFS(siteCacheDir, site.FS())
|
||||
if err != nil {
|
||||
panic(xerrors.Errorf("read site bin failed: %w", err))
|
||||
}
|
||||
|
||||
metricsCache := metricscache.New(
|
||||
options.Database,
|
||||
@@ -658,9 +657,8 @@ func New(options *Options) *API {
|
||||
WebPushPublicKey: api.WebpushDispatcher.PublicKey(),
|
||||
Telemetry: api.Telemetry.Enabled(),
|
||||
}
|
||||
api.SiteHandler = site.New(&site.Options{
|
||||
BinFS: binFS,
|
||||
BinHashes: binHashes,
|
||||
api.SiteHandler, err = site.New(&site.Options{
|
||||
CacheDir: siteCacheDir,
|
||||
Database: options.Database,
|
||||
SiteFS: site.FS(),
|
||||
OAuth2Configs: oauthConfigs,
|
||||
@@ -672,6 +670,9 @@ func New(options *Options) *API {
|
||||
Logger: options.Logger.Named("site"),
|
||||
HideAITasks: options.DeploymentValues.HideAITasks.Value(),
|
||||
})
|
||||
if err != nil {
|
||||
options.Logger.Fatal(ctx, "failed to initialize site handler", slog.Error(err))
|
||||
}
|
||||
api.SiteHandler.Experiments.Store(&experiments)
|
||||
|
||||
if options.UpdateCheckOptions != nil {
|
||||
@@ -737,20 +738,23 @@ func New(options *Options) *API {
|
||||
api.Auditor.Store(&options.Auditor)
|
||||
api.ConnectionLogger.Store(&options.ConnectionLogger)
|
||||
api.TailnetCoordinator.Store(&options.TailnetCoordinator)
|
||||
serverTailnetID := uuid.New()
|
||||
dialer := &InmemTailnetDialer{
|
||||
CoordPtr: &api.TailnetCoordinator,
|
||||
DERPFn: api.DERPMap,
|
||||
Logger: options.Logger,
|
||||
ClientID: uuid.New(),
|
||||
ClientID: serverTailnetID,
|
||||
DatabaseHealthCheck: api.Database,
|
||||
}
|
||||
stn, err := NewServerTailnet(api.ctx,
|
||||
options.Logger,
|
||||
options.DERPServer,
|
||||
serverTailnetID,
|
||||
dialer,
|
||||
options.DeploymentValues.DERP.Config.ForceWebSockets.Value(),
|
||||
options.DeploymentValues.DERP.Config.BlockDirect.Value(),
|
||||
api.TracerProvider,
|
||||
"Coder Server",
|
||||
)
|
||||
if err != nil {
|
||||
panic("failed to setup server tailnet: " + err.Error())
|
||||
@@ -758,6 +762,7 @@ func New(options *Options) *API {
|
||||
api.agentProvider = stn
|
||||
if options.DeploymentValues.Prometheus.Enable {
|
||||
options.PrometheusRegistry.MustRegister(stn)
|
||||
api.lifecycleMetrics = agentapi.NewLifecycleMetrics(options.PrometheusRegistry)
|
||||
}
|
||||
api.NetworkTelemetryBatcher = tailnet.NewNetworkTelemetryBatcher(
|
||||
quartz.NewReal(),
|
||||
@@ -765,17 +770,19 @@ func New(options *Options) *API {
|
||||
api.Options.NetworkTelemetryBatchMaxSize,
|
||||
api.handleNetworkTelemetry,
|
||||
)
|
||||
api.PeerNetworkTelemetryStore = NewPeerNetworkTelemetryStore()
|
||||
if options.CoordinatorResumeTokenProvider == nil {
|
||||
panic("CoordinatorResumeTokenProvider is nil")
|
||||
}
|
||||
api.TailnetClientService, err = tailnet.NewClientService(tailnet.ClientServiceOptions{
|
||||
Logger: api.Logger.Named("tailnetclient"),
|
||||
CoordPtr: &api.TailnetCoordinator,
|
||||
DERPMapUpdateFrequency: api.Options.DERPMapUpdateFrequency,
|
||||
DERPMapFn: api.DERPMap,
|
||||
NetworkTelemetryHandler: api.NetworkTelemetryBatcher.Handler,
|
||||
ResumeTokenProvider: api.Options.CoordinatorResumeTokenProvider,
|
||||
WorkspaceUpdatesProvider: api.UpdatesProvider,
|
||||
Logger: api.Logger.Named("tailnetclient"),
|
||||
CoordPtr: &api.TailnetCoordinator,
|
||||
DERPMapUpdateFrequency: api.Options.DERPMapUpdateFrequency,
|
||||
DERPMapFn: api.DERPMap,
|
||||
NetworkTelemetryHandler: api.NetworkTelemetryBatcher.Handler,
|
||||
IdentifiedTelemetryHandler: api.handleIdentifiedTelemetry,
|
||||
ResumeTokenProvider: api.Options.CoordinatorResumeTokenProvider,
|
||||
WorkspaceUpdatesProvider: api.UpdatesProvider,
|
||||
})
|
||||
if err != nil {
|
||||
api.Logger.Fatal(context.Background(), "failed to initialize tailnet client service", slog.Error(err))
|
||||
@@ -1519,6 +1526,7 @@ func New(options *Options) *API {
|
||||
r.Delete("/", api.deleteWorkspaceAgentPortShare)
|
||||
})
|
||||
r.Get("/timings", api.workspaceTimings)
|
||||
r.Get("/sessions", api.workspaceSessions)
|
||||
r.Route("/acl", func(r chi.Router) {
|
||||
r.Use(
|
||||
httpmw.RequireExperiment(api.Experiments, codersdk.ExperimentWorkspaceSharing),
|
||||
@@ -1830,6 +1838,7 @@ type API struct {
|
||||
WorkspaceClientCoordinateOverride atomic.Pointer[func(rw http.ResponseWriter) bool]
|
||||
TailnetCoordinator atomic.Pointer[tailnet.Coordinator]
|
||||
NetworkTelemetryBatcher *tailnet.NetworkTelemetryBatcher
|
||||
PeerNetworkTelemetryStore *PeerNetworkTelemetryStore
|
||||
TailnetClientService *tailnet.ClientService
|
||||
// WebpushDispatcher is a way to send notifications to users via Web Push.
|
||||
WebpushDispatcher webpush.Dispatcher
|
||||
@@ -1892,8 +1901,9 @@ type API struct {
|
||||
healthCheckCache atomic.Pointer[healthsdk.HealthcheckReport]
|
||||
healthCheckProgress healthcheck.Progress
|
||||
|
||||
statsReporter *workspacestats.Reporter
|
||||
metadataBatcher *metadatabatcher.Batcher
|
||||
statsReporter *workspacestats.Reporter
|
||||
metadataBatcher *metadatabatcher.Batcher
|
||||
lifecycleMetrics *agentapi.LifecycleMetrics
|
||||
|
||||
Acquirer *provisionerdserver.Acquirer
|
||||
// dbRolluper rolls up template usage stats from raw agent and app
|
||||
@@ -1963,6 +1973,36 @@ func (api *API) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleIdentifiedTelemetry stores peer telemetry events and publishes a
|
||||
// workspace update so watch subscribers see fresh data.
|
||||
func (api *API) handleIdentifiedTelemetry(agentID, peerID uuid.UUID, events []*tailnetproto.TelemetryEvent) {
|
||||
if len(events) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, event := range events {
|
||||
api.PeerNetworkTelemetryStore.Update(agentID, peerID, event)
|
||||
}
|
||||
|
||||
// Telemetry callback runs outside any user request, so we use a system
|
||||
// context to look up the workspace for the pubsub notification.
|
||||
ctx := dbauthz.AsSystemRestricted(context.Background()) //nolint:gocritic // Telemetry callback has no user context.
|
||||
workspace, err := api.Database.GetWorkspaceByAgentID(ctx, agentID)
|
||||
if err != nil {
|
||||
api.Logger.Warn(ctx, "failed to resolve workspace for telemetry update",
|
||||
slog.F("agent_id", agentID),
|
||||
slog.Error(err),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
api.publishWorkspaceUpdate(ctx, workspace.OwnerID, wspubsub.WorkspaceEvent{
|
||||
Kind: wspubsub.WorkspaceEventKindConnectionLogUpdate,
|
||||
WorkspaceID: workspace.ID,
|
||||
AgentID: &agentID,
|
||||
})
|
||||
}
|
||||
|
||||
func compressHandler(h http.Handler) http.Handler {
|
||||
level := 5
|
||||
if flag.Lookup("test.v") != nil {
|
||||
@@ -1974,16 +2014,13 @@ func compressHandler(h http.Handler) http.Handler {
|
||||
"application/*",
|
||||
"image/*",
|
||||
)
|
||||
cmp.SetEncoder("br", func(w io.Writer, level int) io.Writer {
|
||||
return brotli.NewWriterLevel(w, level)
|
||||
})
|
||||
cmp.SetEncoder("zstd", func(w io.Writer, level int) io.Writer {
|
||||
zw, err := zstd.NewWriter(w, zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(level)))
|
||||
if err != nil {
|
||||
panic("invalid zstd compressor: " + err.Error())
|
||||
}
|
||||
return zw
|
||||
})
|
||||
for encoding := range site.StandardEncoders {
|
||||
writeCloserFn := site.StandardEncoders[encoding]
|
||||
cmp.SetEncoder(encoding, func(w io.Writer, level int) io.Writer {
|
||||
writeCloser := writeCloserFn(w, level)
|
||||
return writeCloser
|
||||
})
|
||||
}
|
||||
|
||||
return cmp.Handler(h)
|
||||
}
|
||||
@@ -1996,8 +2033,15 @@ func MemoryProvisionerWithVersionOverride(version string) MemoryProvisionerDaemo
|
||||
}
|
||||
}
|
||||
|
||||
func MemoryProvisionerWithHeartbeatOverride(heartbeatFN func(context.Context) error) MemoryProvisionerDaemonOption {
|
||||
return func(opts *memoryProvisionerDaemonOptions) {
|
||||
opts.heartbeatFn = heartbeatFN
|
||||
}
|
||||
}
|
||||
|
||||
type memoryProvisionerDaemonOptions struct {
|
||||
versionOverride string
|
||||
heartbeatFn func(context.Context) error
|
||||
}
|
||||
|
||||
// CreateInMemoryProvisionerDaemon is an in-memory connection to a provisionerd.
|
||||
@@ -2087,6 +2131,7 @@ func (api *API) CreateInMemoryTaggedProvisionerDaemon(dialCtx context.Context, n
|
||||
OIDCConfig: api.OIDCConfig,
|
||||
ExternalAuthConfigs: api.ExternalAuthConfigs,
|
||||
Clock: api.Clock,
|
||||
HeartbeatFn: options.heartbeatFn,
|
||||
},
|
||||
api.NotificationsEnqueuer,
|
||||
&api.PrebuildsReconciler,
|
||||
|
||||
@@ -82,6 +82,10 @@ func (m *FakeConnectionLogger) Contains(t testing.TB, expected database.UpsertCo
|
||||
t.Logf("connection log %d: expected AgentName %s, got %s", idx+1, expected.AgentName, cl.AgentName)
|
||||
continue
|
||||
}
|
||||
if expected.AgentID.Valid && cl.AgentID.UUID != expected.AgentID.UUID {
|
||||
t.Logf("connection log %d: expected AgentID %s, got %s", idx+1, expected.AgentID.UUID, cl.AgentID.UUID)
|
||||
continue
|
||||
}
|
||||
if expected.Type != "" && cl.Type != expected.Type {
|
||||
t.Logf("connection log %d: expected Type %s, got %s", idx+1, expected.Type, cl.Type)
|
||||
continue
|
||||
|
||||
@@ -0,0 +1,938 @@
|
||||
package database_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/sqlc-dev/pqtype"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtestutil"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
)
|
||||
|
||||
func TestCloseOpenAgentConnectionLogsForWorkspace(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
u := dbgen.User(t, db, database.User{})
|
||||
o := dbgen.Organization(t, db, database.Organization{})
|
||||
tpl := dbgen.Template(t, db, database.Template{
|
||||
OrganizationID: o.ID,
|
||||
CreatedBy: u.ID,
|
||||
})
|
||||
|
||||
ws1 := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
ID: uuid.New(),
|
||||
OwnerID: u.ID,
|
||||
OrganizationID: o.ID,
|
||||
AutomaticUpdates: database.AutomaticUpdatesNever,
|
||||
TemplateID: tpl.ID,
|
||||
})
|
||||
ws2 := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
ID: uuid.New(),
|
||||
OwnerID: u.ID,
|
||||
OrganizationID: o.ID,
|
||||
AutomaticUpdates: database.AutomaticUpdatesNever,
|
||||
TemplateID: tpl.ID,
|
||||
})
|
||||
|
||||
ip := pqtype.Inet{
|
||||
IPNet: net.IPNet{
|
||||
IP: net.IPv4(127, 0, 0, 1),
|
||||
Mask: net.IPv4Mask(255, 255, 255, 255),
|
||||
},
|
||||
Valid: true,
|
||||
}
|
||||
|
||||
// Simulate agent clock skew by using a connect time in the future.
|
||||
connectTime := dbtime.Now().Add(time.Hour)
|
||||
|
||||
sshLog1, err := db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: connectTime,
|
||||
OrganizationID: ws1.OrganizationID,
|
||||
WorkspaceOwnerID: ws1.OwnerID,
|
||||
WorkspaceID: ws1.ID,
|
||||
WorkspaceName: ws1.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeSsh,
|
||||
Ip: ip,
|
||||
ConnectionID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
appLog, err := db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: dbtime.Now(),
|
||||
OrganizationID: ws1.OrganizationID,
|
||||
WorkspaceOwnerID: ws1.OwnerID,
|
||||
WorkspaceID: ws1.ID,
|
||||
WorkspaceName: ws1.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeWorkspaceApp,
|
||||
Ip: ip,
|
||||
UserAgent: sql.NullString{String: "test", Valid: true},
|
||||
UserID: uuid.NullUUID{UUID: ws1.OwnerID, Valid: true},
|
||||
SlugOrPort: sql.NullString{String: "app", Valid: true},
|
||||
Code: sql.NullInt32{Int32: 200, Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
sshLog2, err := db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: dbtime.Now(),
|
||||
OrganizationID: ws2.OrganizationID,
|
||||
WorkspaceOwnerID: ws2.OwnerID,
|
||||
WorkspaceID: ws2.ID,
|
||||
WorkspaceName: ws2.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeSsh,
|
||||
Ip: ip,
|
||||
ConnectionID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
rowsClosed, err := db.CloseOpenAgentConnectionLogsForWorkspace(ctx, database.CloseOpenAgentConnectionLogsForWorkspaceParams{
|
||||
WorkspaceID: ws1.ID,
|
||||
ClosedAt: dbtime.Now(),
|
||||
Reason: "workspace stopped",
|
||||
Types: []database.ConnectionType{
|
||||
database.ConnectionTypeSsh,
|
||||
database.ConnectionTypeVscode,
|
||||
database.ConnectionTypeJetbrains,
|
||||
database.ConnectionTypeReconnectingPty,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, rowsClosed)
|
||||
|
||||
ws1Rows, err := db.GetConnectionLogsOffset(ctx, database.GetConnectionLogsOffsetParams{WorkspaceID: ws1.ID})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, ws1Rows, 2)
|
||||
|
||||
for _, row := range ws1Rows {
|
||||
switch row.ConnectionLog.ID {
|
||||
case sshLog1.ID:
|
||||
updated := row.ConnectionLog
|
||||
require.True(t, updated.DisconnectTime.Valid)
|
||||
require.True(t, updated.DisconnectReason.Valid)
|
||||
require.Equal(t, "workspace stopped", updated.DisconnectReason.String)
|
||||
require.False(t, updated.DisconnectTime.Time.Before(updated.ConnectTime), "disconnect_time should never be before connect_time")
|
||||
case appLog.ID:
|
||||
notClosed := row.ConnectionLog
|
||||
require.False(t, notClosed.DisconnectTime.Valid)
|
||||
require.False(t, notClosed.DisconnectReason.Valid)
|
||||
default:
|
||||
t.Fatalf("unexpected connection log id: %s", row.ConnectionLog.ID)
|
||||
}
|
||||
}
|
||||
|
||||
ws2Rows, err := db.GetConnectionLogsOffset(ctx, database.GetConnectionLogsOffsetParams{WorkspaceID: ws2.ID})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, ws2Rows, 1)
|
||||
require.Equal(t, sshLog2.ID, ws2Rows[0].ConnectionLog.ID)
|
||||
require.False(t, ws2Rows[0].ConnectionLog.DisconnectTime.Valid)
|
||||
}
|
||||
|
||||
// Regression test: CloseConnectionLogsAndCreateSessions must not fail
|
||||
// when connection_logs have NULL IPs (e.g., disconnect-only tunnel
|
||||
// events). NULL-IP logs should be closed but no session created for
|
||||
// them.
|
||||
func TestCloseConnectionLogsAndCreateSessions_NullIP(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
u := dbgen.User(t, db, database.User{})
|
||||
o := dbgen.Organization(t, db, database.Organization{})
|
||||
tpl := dbgen.Template(t, db, database.Template{
|
||||
OrganizationID: o.ID,
|
||||
CreatedBy: u.ID,
|
||||
})
|
||||
ws := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OwnerID: u.ID,
|
||||
OrganizationID: o.ID,
|
||||
AutomaticUpdates: database.AutomaticUpdatesNever,
|
||||
TemplateID: tpl.ID,
|
||||
})
|
||||
|
||||
validIP := pqtype.Inet{
|
||||
IPNet: net.IPNet{
|
||||
IP: net.IPv4(10, 0, 0, 1),
|
||||
Mask: net.IPv4Mask(255, 255, 255, 255),
|
||||
},
|
||||
Valid: true,
|
||||
}
|
||||
now := dbtime.Now()
|
||||
|
||||
// Connection with a valid IP.
|
||||
sshLog, err := db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: now.Add(-30 * time.Minute),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeSsh,
|
||||
Ip: validIP,
|
||||
ConnectionID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Connection with a NULL IP — simulates a disconnect-only tunnel
|
||||
// event where the source node info is unavailable.
|
||||
nullIPLog, err := db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: now.Add(-25 * time.Minute),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeSystem,
|
||||
Ip: pqtype.Inet{Valid: false},
|
||||
ConnectionID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// This previously failed with: "pq: null value in column ip of
|
||||
// relation workspace_sessions violates not-null constraint".
|
||||
closedAt := now.Add(-5 * time.Minute)
|
||||
_, err = db.CloseConnectionLogsAndCreateSessions(ctx, database.CloseConnectionLogsAndCreateSessionsParams{
|
||||
ClosedAt: sql.NullTime{Time: closedAt, Valid: true},
|
||||
Reason: sql.NullString{String: "workspace stopped", Valid: true},
|
||||
WorkspaceID: ws.ID,
|
||||
Types: []database.ConnectionType{
|
||||
database.ConnectionTypeSsh,
|
||||
database.ConnectionTypeSystem,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify both logs were closed.
|
||||
rows, err := db.GetConnectionLogsOffset(ctx, database.GetConnectionLogsOffsetParams{
|
||||
WorkspaceID: ws.ID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, rows, 2)
|
||||
|
||||
for _, row := range rows {
|
||||
cl := row.ConnectionLog
|
||||
require.True(t, cl.DisconnectTime.Valid,
|
||||
"connection log %s (type=%s) should be closed", cl.ID, cl.Type)
|
||||
|
||||
switch cl.ID {
|
||||
case sshLog.ID:
|
||||
// Valid-IP log should have a session.
|
||||
require.True(t, cl.SessionID.Valid,
|
||||
"valid-IP log should be linked to a session")
|
||||
case nullIPLog.ID:
|
||||
// NULL-IP system connection overlaps with the SSH
|
||||
// session, so it gets attached to that session.
|
||||
require.True(t, cl.SessionID.Valid,
|
||||
"NULL-IP system log overlapping with SSH session should be linked to a session")
|
||||
default:
|
||||
t.Fatalf("unexpected connection log id: %s", cl.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test: CloseConnectionLogsAndCreateSessions must handle
|
||||
// connections that are already disconnected but have no session_id
|
||||
// (e.g., system/tunnel connections disconnected by dbsink). It must
|
||||
// also avoid creating duplicate sessions when assignSessionForDisconnect
|
||||
// has already created one for the same IP/time range.
|
||||
func TestCloseConnectionLogsAndCreateSessions_AlreadyDisconnectedGetsSession(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
u := dbgen.User(t, db, database.User{})
|
||||
o := dbgen.Organization(t, db, database.Organization{})
|
||||
tpl := dbgen.Template(t, db, database.Template{
|
||||
OrganizationID: o.ID,
|
||||
CreatedBy: u.ID,
|
||||
})
|
||||
ws := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OwnerID: u.ID,
|
||||
OrganizationID: o.ID,
|
||||
AutomaticUpdates: database.AutomaticUpdatesNever,
|
||||
TemplateID: tpl.ID,
|
||||
})
|
||||
|
||||
ip := pqtype.Inet{
|
||||
IPNet: net.IPNet{
|
||||
IP: net.IPv4(127, 0, 0, 1),
|
||||
Mask: net.IPv4Mask(255, 255, 255, 255),
|
||||
},
|
||||
Valid: true,
|
||||
}
|
||||
now := dbtime.Now()
|
||||
|
||||
// A system connection that was already disconnected (by dbsink)
|
||||
// but has no session_id — dbsink doesn't assign sessions.
|
||||
sysConnID := uuid.New()
|
||||
_, err := db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: now.Add(-10 * time.Minute),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeSystem,
|
||||
Ip: ip,
|
||||
ConnectionID: uuid.NullUUID{UUID: sysConnID, Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: now.Add(-5 * time.Minute),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeSystem,
|
||||
Ip: ip,
|
||||
ConnectionID: uuid.NullUUID{UUID: sysConnID, Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusDisconnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Run CloseConnectionLogsAndCreateSessions (workspace stop).
|
||||
closedAt := now
|
||||
_, err = db.CloseConnectionLogsAndCreateSessions(ctx, database.CloseConnectionLogsAndCreateSessionsParams{
|
||||
ClosedAt: sql.NullTime{Time: closedAt, Valid: true},
|
||||
Reason: sql.NullString{String: "workspace stopped", Valid: true},
|
||||
WorkspaceID: ws.ID,
|
||||
Types: []database.ConnectionType{
|
||||
database.ConnectionTypeSsh,
|
||||
database.ConnectionTypeSystem,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// The system connection should now have a session_id.
|
||||
rows, err := db.GetConnectionLogsOffset(ctx, database.GetConnectionLogsOffsetParams{
|
||||
WorkspaceID: ws.ID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, rows, 1)
|
||||
require.True(t, rows[0].ConnectionLog.SessionID.Valid,
|
||||
"already-disconnected system connection should be assigned to a session")
|
||||
}
|
||||
|
||||
// Regression test: when assignSessionForDisconnect has already
|
||||
// created a session for an SSH connection,
|
||||
// CloseConnectionLogsAndCreateSessions must reuse that session
|
||||
// instead of creating a duplicate.
|
||||
func TestCloseConnectionLogsAndCreateSessions_ReusesExistingSession(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
u := dbgen.User(t, db, database.User{})
|
||||
o := dbgen.Organization(t, db, database.Organization{})
|
||||
tpl := dbgen.Template(t, db, database.Template{
|
||||
OrganizationID: o.ID,
|
||||
CreatedBy: u.ID,
|
||||
})
|
||||
ws := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OwnerID: u.ID,
|
||||
OrganizationID: o.ID,
|
||||
AutomaticUpdates: database.AutomaticUpdatesNever,
|
||||
TemplateID: tpl.ID,
|
||||
})
|
||||
|
||||
ip := pqtype.Inet{
|
||||
IPNet: net.IPNet{
|
||||
IP: net.IPv4(127, 0, 0, 1),
|
||||
Mask: net.IPv4Mask(255, 255, 255, 255),
|
||||
},
|
||||
Valid: true,
|
||||
}
|
||||
now := dbtime.Now()
|
||||
|
||||
// Simulate an SSH connection where assignSessionForDisconnect
|
||||
// already created a session but the connection log's session_id
|
||||
// was set (the normal successful path).
|
||||
sshConnID := uuid.New()
|
||||
_, err := db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: now.Add(-10 * time.Minute),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeSsh,
|
||||
Ip: ip,
|
||||
ConnectionID: uuid.NullUUID{UUID: sshConnID, Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
sshLog, err := db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: now.Add(-5 * time.Minute),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeSsh,
|
||||
Ip: ip,
|
||||
ConnectionID: uuid.NullUUID{UUID: sshConnID, Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusDisconnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create the session that assignSessionForDisconnect would have
|
||||
// created, and link the connection log to it.
|
||||
existingSessionIDRaw, err := db.FindOrCreateSessionForDisconnect(ctx, database.FindOrCreateSessionForDisconnectParams{
|
||||
WorkspaceID: ws.ID.String(),
|
||||
Ip: ip,
|
||||
ConnectTime: sshLog.ConnectTime,
|
||||
DisconnectTime: sshLog.DisconnectTime.Time,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
existingSessionID, err := uuid.Parse(fmt.Sprintf("%s", existingSessionIDRaw))
|
||||
require.NoError(t, err)
|
||||
err = db.UpdateConnectionLogSessionID(ctx, database.UpdateConnectionLogSessionIDParams{
|
||||
ID: sshLog.ID,
|
||||
SessionID: uuid.NullUUID{UUID: existingSessionID, Valid: true},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Also add a system connection (no session, already disconnected).
|
||||
sysConnID := uuid.New()
|
||||
_, err = db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: now.Add(-10 * time.Minute),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeSystem,
|
||||
Ip: ip,
|
||||
ConnectionID: uuid.NullUUID{UUID: sysConnID, Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: now.Add(-5 * time.Minute),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeSystem,
|
||||
Ip: ip,
|
||||
ConnectionID: uuid.NullUUID{UUID: sysConnID, Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusDisconnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Run CloseConnectionLogsAndCreateSessions.
|
||||
closedAt := now
|
||||
_, err = db.CloseConnectionLogsAndCreateSessions(ctx, database.CloseConnectionLogsAndCreateSessionsParams{
|
||||
ClosedAt: sql.NullTime{Time: closedAt, Valid: true},
|
||||
Reason: sql.NullString{String: "workspace stopped", Valid: true},
|
||||
WorkspaceID: ws.ID,
|
||||
Types: []database.ConnectionType{
|
||||
database.ConnectionTypeSsh,
|
||||
database.ConnectionTypeSystem,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify: the system connection should be assigned to the
|
||||
// EXISTING session (reused), not a new one.
|
||||
rows, err := db.GetConnectionLogsOffset(ctx, database.GetConnectionLogsOffsetParams{
|
||||
WorkspaceID: ws.ID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, rows, 2)
|
||||
|
||||
for _, row := range rows {
|
||||
cl := row.ConnectionLog
|
||||
require.True(t, cl.SessionID.Valid,
|
||||
"connection log %s (type=%s) should have a session", cl.ID, cl.Type)
|
||||
require.Equal(t, existingSessionID, cl.SessionID.UUID,
|
||||
"connection log %s should reuse the existing session, not create a new one", cl.ID)
|
||||
}
|
||||
}
|
||||
|
||||
// Test: connections with different IPs but same hostname get grouped
|
||||
// into one session.
|
||||
func TestCloseConnectionLogsAndCreateSessions_GroupsByHostname(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
u := dbgen.User(t, db, database.User{})
|
||||
o := dbgen.Organization(t, db, database.Organization{})
|
||||
tpl := dbgen.Template(t, db, database.Template{
|
||||
OrganizationID: o.ID,
|
||||
CreatedBy: u.ID,
|
||||
})
|
||||
ws := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OwnerID: u.ID,
|
||||
OrganizationID: o.ID,
|
||||
AutomaticUpdates: database.AutomaticUpdatesNever,
|
||||
TemplateID: tpl.ID,
|
||||
})
|
||||
|
||||
now := dbtime.Now()
|
||||
hostname := sql.NullString{String: "my-laptop", Valid: true}
|
||||
|
||||
// Create 3 SSH connections with different IPs but same hostname,
|
||||
// overlapping in time.
|
||||
for i := 0; i < 3; i++ {
|
||||
ip := pqtype.Inet{
|
||||
IPNet: net.IPNet{
|
||||
IP: net.IPv4(10, 0, 0, byte(i+1)),
|
||||
Mask: net.IPv4Mask(255, 255, 255, 255),
|
||||
},
|
||||
Valid: true,
|
||||
}
|
||||
_, err := db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: now.Add(time.Duration(-30+i*5) * time.Minute),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeSsh,
|
||||
Ip: ip,
|
||||
ClientHostname: hostname,
|
||||
ConnectionID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
closedAt := now
|
||||
_, err := db.CloseConnectionLogsAndCreateSessions(ctx, database.CloseConnectionLogsAndCreateSessionsParams{
|
||||
ClosedAt: sql.NullTime{Time: closedAt, Valid: true},
|
||||
Reason: sql.NullString{String: "workspace stopped", Valid: true},
|
||||
WorkspaceID: ws.ID,
|
||||
Types: []database.ConnectionType{
|
||||
database.ConnectionTypeSsh,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
rows, err := db.GetConnectionLogsOffset(ctx, database.GetConnectionLogsOffsetParams{
|
||||
WorkspaceID: ws.ID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, rows, 3)
|
||||
|
||||
// All 3 connections should have the same session_id.
|
||||
var sessionID uuid.UUID
|
||||
for i, row := range rows {
|
||||
cl := row.ConnectionLog
|
||||
require.True(t, cl.SessionID.Valid,
|
||||
"connection %d should have a session", i)
|
||||
if i == 0 {
|
||||
sessionID = cl.SessionID.UUID
|
||||
} else {
|
||||
require.Equal(t, sessionID, cl.SessionID.UUID,
|
||||
"all connections with same hostname should share one session")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test: a long-running system connection gets attached to the first
|
||||
// overlapping primary session, not the second.
|
||||
func TestCloseConnectionLogsAndCreateSessions_SystemAttachesToFirstSession(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
u := dbgen.User(t, db, database.User{})
|
||||
o := dbgen.Organization(t, db, database.Organization{})
|
||||
tpl := dbgen.Template(t, db, database.Template{
|
||||
OrganizationID: o.ID,
|
||||
CreatedBy: u.ID,
|
||||
})
|
||||
ws := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OwnerID: u.ID,
|
||||
OrganizationID: o.ID,
|
||||
AutomaticUpdates: database.AutomaticUpdatesNever,
|
||||
TemplateID: tpl.ID,
|
||||
})
|
||||
|
||||
ip := pqtype.Inet{
|
||||
IPNet: net.IPNet{
|
||||
IP: net.IPv4(10, 0, 0, 1),
|
||||
Mask: net.IPv4Mask(255, 255, 255, 255),
|
||||
},
|
||||
Valid: true,
|
||||
}
|
||||
now := dbtime.Now()
|
||||
|
||||
// System connection spanning the full workspace lifetime.
|
||||
sysLog, err := db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: now.Add(-3 * time.Hour),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeSystem,
|
||||
Ip: ip,
|
||||
ConnectionID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// SSH session 1: -3h to -2h.
|
||||
ssh1ConnID := uuid.New()
|
||||
_, err = db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: now.Add(-3 * time.Hour),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeSsh,
|
||||
Ip: ip,
|
||||
ConnectionID: uuid.NullUUID{UUID: ssh1ConnID, Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
ssh1Disc, err := db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: now.Add(-2 * time.Hour),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeSsh,
|
||||
Ip: ip,
|
||||
ConnectionID: uuid.NullUUID{UUID: ssh1ConnID, Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusDisconnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_ = ssh1Disc
|
||||
|
||||
// SSH session 2: -30min to now (>30min gap from session 1).
|
||||
_, err = db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: now.Add(-30 * time.Minute),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeSsh,
|
||||
Ip: ip,
|
||||
ConnectionID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
closedAt := now
|
||||
_, err = db.CloseConnectionLogsAndCreateSessions(ctx, database.CloseConnectionLogsAndCreateSessionsParams{
|
||||
ClosedAt: sql.NullTime{Time: closedAt, Valid: true},
|
||||
Reason: sql.NullString{String: "workspace stopped", Valid: true},
|
||||
WorkspaceID: ws.ID,
|
||||
Types: []database.ConnectionType{
|
||||
database.ConnectionTypeSsh,
|
||||
database.ConnectionTypeSystem,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
rows, err := db.GetConnectionLogsOffset(ctx, database.GetConnectionLogsOffsetParams{
|
||||
WorkspaceID: ws.ID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Find the system connection and its assigned session.
|
||||
var sysSessionID uuid.UUID
|
||||
// Collect all session IDs from SSH connections to verify 2
|
||||
// distinct sessions were created.
|
||||
sshSessionIDs := make(map[uuid.UUID]bool)
|
||||
for _, row := range rows {
|
||||
cl := row.ConnectionLog
|
||||
if cl.ID == sysLog.ID {
|
||||
require.True(t, cl.SessionID.Valid,
|
||||
"system connection should have a session")
|
||||
sysSessionID = cl.SessionID.UUID
|
||||
}
|
||||
if cl.Type == database.ConnectionTypeSsh && cl.SessionID.Valid {
|
||||
sshSessionIDs[cl.SessionID.UUID] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Two distinct SSH sessions should exist (>30min gap).
|
||||
require.Len(t, sshSessionIDs, 2, "should have 2 distinct SSH sessions")
|
||||
|
||||
// System connection should be attached to the first (earliest)
|
||||
// session.
|
||||
require.True(t, sshSessionIDs[sysSessionID],
|
||||
"system connection should be attached to one of the SSH sessions")
|
||||
}
|
||||
|
||||
// Test: an orphaned system connection (no overlapping primary sessions)
|
||||
// with an IP gets its own session.
|
||||
func TestCloseConnectionLogsAndCreateSessions_OrphanSystemGetsOwnSession(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
u := dbgen.User(t, db, database.User{})
|
||||
o := dbgen.Organization(t, db, database.Organization{})
|
||||
tpl := dbgen.Template(t, db, database.Template{
|
||||
OrganizationID: o.ID,
|
||||
CreatedBy: u.ID,
|
||||
})
|
||||
ws := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OwnerID: u.ID,
|
||||
OrganizationID: o.ID,
|
||||
AutomaticUpdates: database.AutomaticUpdatesNever,
|
||||
TemplateID: tpl.ID,
|
||||
})
|
||||
|
||||
ip := pqtype.Inet{
|
||||
IPNet: net.IPNet{
|
||||
IP: net.IPv4(10, 0, 0, 1),
|
||||
Mask: net.IPv4Mask(255, 255, 255, 255),
|
||||
},
|
||||
Valid: true,
|
||||
}
|
||||
now := dbtime.Now()
|
||||
|
||||
// System connection with an IP but no overlapping primary
|
||||
// connections.
|
||||
_, err := db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: now.Add(-10 * time.Minute),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeSystem,
|
||||
Ip: ip,
|
||||
ConnectionID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
closedAt := now
|
||||
_, err = db.CloseConnectionLogsAndCreateSessions(ctx, database.CloseConnectionLogsAndCreateSessionsParams{
|
||||
ClosedAt: sql.NullTime{Time: closedAt, Valid: true},
|
||||
Reason: sql.NullString{String: "workspace stopped", Valid: true},
|
||||
WorkspaceID: ws.ID,
|
||||
Types: []database.ConnectionType{
|
||||
database.ConnectionTypeSystem,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
rows, err := db.GetConnectionLogsOffset(ctx, database.GetConnectionLogsOffsetParams{
|
||||
WorkspaceID: ws.ID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, rows, 1)
|
||||
require.True(t, rows[0].ConnectionLog.SessionID.Valid,
|
||||
"orphaned system connection with IP should get its own session")
|
||||
}
|
||||
|
||||
// Test: a system connection with NULL IP and no overlapping primary
|
||||
// sessions gets no session (can't create a useful session without IP).
|
||||
func TestCloseConnectionLogsAndCreateSessions_SystemNoIPNoSession(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
u := dbgen.User(t, db, database.User{})
|
||||
o := dbgen.Organization(t, db, database.Organization{})
|
||||
tpl := dbgen.Template(t, db, database.Template{
|
||||
OrganizationID: o.ID,
|
||||
CreatedBy: u.ID,
|
||||
})
|
||||
ws := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OwnerID: u.ID,
|
||||
OrganizationID: o.ID,
|
||||
AutomaticUpdates: database.AutomaticUpdatesNever,
|
||||
TemplateID: tpl.ID,
|
||||
})
|
||||
|
||||
now := dbtime.Now()
|
||||
|
||||
// System connection with NULL IP and no overlapping primary.
|
||||
_, err := db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: now.Add(-10 * time.Minute),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeSystem,
|
||||
Ip: pqtype.Inet{Valid: false},
|
||||
ConnectionID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
closedAt := now
|
||||
_, err = db.CloseConnectionLogsAndCreateSessions(ctx, database.CloseConnectionLogsAndCreateSessionsParams{
|
||||
ClosedAt: sql.NullTime{Time: closedAt, Valid: true},
|
||||
Reason: sql.NullString{String: "workspace stopped", Valid: true},
|
||||
WorkspaceID: ws.ID,
|
||||
Types: []database.ConnectionType{
|
||||
database.ConnectionTypeSystem,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
rows, err := db.GetConnectionLogsOffset(ctx, database.GetConnectionLogsOffsetParams{
|
||||
WorkspaceID: ws.ID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, rows, 1)
|
||||
require.True(t, rows[0].ConnectionLog.DisconnectTime.Valid,
|
||||
"system connection should be closed")
|
||||
require.False(t, rows[0].ConnectionLog.SessionID.Valid,
|
||||
"NULL-IP system connection with no primary overlap should not get a session")
|
||||
}
|
||||
|
||||
// Test: connections from the same hostname with a >30-minute gap
|
||||
// create separate sessions.
|
||||
func TestCloseConnectionLogsAndCreateSessions_SeparateSessionsForLargeGap(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
u := dbgen.User(t, db, database.User{})
|
||||
o := dbgen.Organization(t, db, database.Organization{})
|
||||
tpl := dbgen.Template(t, db, database.Template{
|
||||
OrganizationID: o.ID,
|
||||
CreatedBy: u.ID,
|
||||
})
|
||||
ws := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OwnerID: u.ID,
|
||||
OrganizationID: o.ID,
|
||||
AutomaticUpdates: database.AutomaticUpdatesNever,
|
||||
TemplateID: tpl.ID,
|
||||
})
|
||||
|
||||
ip := pqtype.Inet{
|
||||
IPNet: net.IPNet{
|
||||
IP: net.IPv4(10, 0, 0, 1),
|
||||
Mask: net.IPv4Mask(255, 255, 255, 255),
|
||||
},
|
||||
Valid: true,
|
||||
}
|
||||
now := dbtime.Now()
|
||||
|
||||
// SSH connection 1: -3h to -2h.
|
||||
conn1ID := uuid.New()
|
||||
_, err := db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: now.Add(-3 * time.Hour),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeSsh,
|
||||
Ip: ip,
|
||||
ConnectionID: uuid.NullUUID{UUID: conn1ID, Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: now.Add(-2 * time.Hour),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeSsh,
|
||||
Ip: ip,
|
||||
ConnectionID: uuid.NullUUID{UUID: conn1ID, Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusDisconnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// SSH connection 2: -30min to now (>30min gap from connection 1).
|
||||
_, err = db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: now.Add(-30 * time.Minute),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: "agent",
|
||||
Type: database.ConnectionTypeSsh,
|
||||
Ip: ip,
|
||||
ConnectionID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
closedAt := now
|
||||
_, err = db.CloseConnectionLogsAndCreateSessions(ctx, database.CloseConnectionLogsAndCreateSessionsParams{
|
||||
ClosedAt: sql.NullTime{Time: closedAt, Valid: true},
|
||||
Reason: sql.NullString{String: "workspace stopped", Valid: true},
|
||||
WorkspaceID: ws.ID,
|
||||
Types: []database.ConnectionType{
|
||||
database.ConnectionTypeSsh,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
rows, err := db.GetConnectionLogsOffset(ctx, database.GetConnectionLogsOffsetParams{
|
||||
WorkspaceID: ws.ID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
sessionIDs := make(map[uuid.UUID]bool)
|
||||
for _, row := range rows {
|
||||
cl := row.ConnectionLog
|
||||
if cl.SessionID.Valid {
|
||||
sessionIDs[cl.SessionID.UUID] = true
|
||||
}
|
||||
}
|
||||
require.Len(t, sessionIDs, 2,
|
||||
"connections with >30min gap should create 2 separate sessions")
|
||||
}
|
||||
@@ -0,0 +1,239 @@
|
||||
package database_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbfake"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtestutil"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
)
|
||||
|
||||
func TestGetOngoingAgentConnectionsLast24h(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := context.Background()
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
|
||||
org := dbfake.Organization(t, db).Do()
|
||||
user := dbgen.User(t, db, database.User{})
|
||||
tpl := dbgen.Template(t, db, database.Template{OrganizationID: org.Org.ID, CreatedBy: user.ID})
|
||||
ws := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OrganizationID: org.Org.ID,
|
||||
OwnerID: user.ID,
|
||||
TemplateID: tpl.ID,
|
||||
Name: "ws",
|
||||
})
|
||||
|
||||
now := dbtime.Now()
|
||||
since := now.Add(-24 * time.Hour)
|
||||
|
||||
const (
|
||||
agent1 = "agent1"
|
||||
agent2 = "agent2"
|
||||
)
|
||||
|
||||
// Insert a disconnected log that should be excluded.
|
||||
disconnectedConnID := uuid.New()
|
||||
disconnected := dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{
|
||||
Time: now.Add(-30 * time.Minute),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: agent1,
|
||||
Type: database.ConnectionTypeSsh,
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
ConnectionID: uuid.NullUUID{UUID: disconnectedConnID, Valid: true},
|
||||
})
|
||||
_ = dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{
|
||||
Time: now.Add(-20 * time.Minute),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
AgentName: disconnected.AgentName,
|
||||
ConnectionStatus: database.ConnectionStatusDisconnected,
|
||||
ConnectionID: disconnected.ConnectionID,
|
||||
DisconnectReason: sql.NullString{String: "closed", Valid: true},
|
||||
})
|
||||
|
||||
// Insert an old log that should be excluded by the 24h window.
|
||||
_ = dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{
|
||||
Time: now.Add(-25 * time.Hour),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: agent1,
|
||||
Type: database.ConnectionTypeSsh,
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
ConnectionID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
})
|
||||
|
||||
// Insert a web log that should be excluded by the types filter.
|
||||
_ = dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{
|
||||
Time: now.Add(-10 * time.Minute),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: agent1,
|
||||
Type: database.ConnectionTypeWorkspaceApp,
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
ConnectionID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
})
|
||||
|
||||
// Insert 55 active logs for agent1 (should be capped to 50).
|
||||
for i := 0; i < 55; i++ {
|
||||
_ = dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{
|
||||
Time: now.Add(-time.Duration(i) * time.Minute),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: agent1,
|
||||
Type: database.ConnectionTypeVscode,
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
ConnectionID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
})
|
||||
}
|
||||
|
||||
// Insert one active log for agent2.
|
||||
agent2Log := dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{
|
||||
Time: now.Add(-5 * time.Minute),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: agent2,
|
||||
Type: database.ConnectionTypeJetbrains,
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
ConnectionID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
})
|
||||
|
||||
logs, err := db.GetOngoingAgentConnectionsLast24h(ctx, database.GetOngoingAgentConnectionsLast24hParams{
|
||||
WorkspaceIds: []uuid.UUID{ws.ID},
|
||||
AgentNames: []string{agent1, agent2},
|
||||
Types: []database.ConnectionType{database.ConnectionTypeSsh, database.ConnectionTypeVscode, database.ConnectionTypeJetbrains, database.ConnectionTypeReconnectingPty},
|
||||
Since: since,
|
||||
PerAgentLimit: 50,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
byAgent := map[string][]database.GetOngoingAgentConnectionsLast24hRow{}
|
||||
for _, l := range logs {
|
||||
byAgent[l.AgentName] = append(byAgent[l.AgentName], l)
|
||||
}
|
||||
|
||||
// Agent1 should be capped at 50 and contain only active logs within the window.
|
||||
require.Len(t, byAgent[agent1], 50)
|
||||
for i, l := range byAgent[agent1] {
|
||||
require.False(t, l.DisconnectTime.Valid, "expected log to be ongoing")
|
||||
require.True(t, l.ConnectTime.After(since) || l.ConnectTime.Equal(since), "expected log to be within window")
|
||||
if i > 0 {
|
||||
require.True(t, byAgent[agent1][i-1].ConnectTime.After(l.ConnectTime) || byAgent[agent1][i-1].ConnectTime.Equal(l.ConnectTime), "expected logs to be ordered by connect_time desc")
|
||||
}
|
||||
}
|
||||
|
||||
// Agent2 should include its single active log.
|
||||
require.Equal(t, []uuid.UUID{agent2Log.ID}, []uuid.UUID{byAgent[agent2][0].ID})
|
||||
}
|
||||
|
||||
func TestGetOngoingAgentConnectionsLast24h_PortForwarding(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := context.Background()
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
|
||||
org := dbfake.Organization(t, db).Do()
|
||||
user := dbgen.User(t, db, database.User{})
|
||||
tpl := dbgen.Template(t, db, database.Template{OrganizationID: org.Org.ID, CreatedBy: user.ID})
|
||||
ws := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OrganizationID: org.Org.ID,
|
||||
OwnerID: user.ID,
|
||||
TemplateID: tpl.ID,
|
||||
Name: "ws-pf",
|
||||
})
|
||||
|
||||
now := dbtime.Now()
|
||||
since := now.Add(-24 * time.Hour)
|
||||
|
||||
const agentName = "agent-pf"
|
||||
|
||||
// Agent-reported: NULL user_agent, included unconditionally.
|
||||
agentReported := dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{
|
||||
Time: now.Add(-10 * time.Minute),
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: agentName,
|
||||
Type: database.ConnectionTypePortForwarding,
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
ConnectionID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
SlugOrPort: sql.NullString{String: "8080", Valid: true},
|
||||
Ip: database.ParseIP("fd7a:115c:a1e0:4353:89d9:4ca8:9c42:8d2d"),
|
||||
})
|
||||
|
||||
// Stale proxy-reported: non-NULL user_agent, bumped but older than AppActiveSince.
|
||||
// Use a non-localhost IP to verify the fix works even behind a reverse proxy.
|
||||
staleConnID := uuid.New()
|
||||
staleConnectTime := now.Add(-15 * time.Minute)
|
||||
_ = dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{
|
||||
Time: staleConnectTime,
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: agentName,
|
||||
Type: database.ConnectionTypePortForwarding,
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
ConnectionID: uuid.NullUUID{UUID: staleConnID, Valid: true},
|
||||
SlugOrPort: sql.NullString{String: "3000", Valid: true},
|
||||
Ip: database.ParseIP("203.0.113.45"),
|
||||
UserAgent: sql.NullString{String: "Mozilla/5.0", Valid: true},
|
||||
})
|
||||
|
||||
// Bump updated_at to simulate a proxy refresh.
|
||||
staleBumpTime := now.Add(-8 * time.Minute)
|
||||
_, err := db.UpsertConnectionLog(ctx, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: staleBumpTime,
|
||||
OrganizationID: ws.OrganizationID,
|
||||
WorkspaceOwnerID: ws.OwnerID,
|
||||
WorkspaceID: ws.ID,
|
||||
WorkspaceName: ws.Name,
|
||||
AgentName: agentName,
|
||||
Type: database.ConnectionTypePortForwarding,
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
ConnectionID: uuid.NullUUID{UUID: staleConnID, Valid: true},
|
||||
SlugOrPort: sql.NullString{String: "3000", Valid: true},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
appActiveSince := now.Add(-5 * time.Minute)
|
||||
|
||||
logs, err := db.GetOngoingAgentConnectionsLast24h(ctx, database.GetOngoingAgentConnectionsLast24hParams{
|
||||
WorkspaceIds: []uuid.UUID{ws.ID},
|
||||
AgentNames: []string{agentName},
|
||||
Types: []database.ConnectionType{database.ConnectionTypePortForwarding},
|
||||
Since: since,
|
||||
PerAgentLimit: 50,
|
||||
AppActiveSince: appActiveSince,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Only the agent-reported connection should appear.
|
||||
require.Len(t, logs, 1)
|
||||
require.Equal(t, agentReported.ID, logs[0].ID)
|
||||
require.Equal(t, database.ConnectionTypePortForwarding, logs[0].Type)
|
||||
require.True(t, logs[0].SlugOrPort.Valid)
|
||||
require.Equal(t, "8080", logs[0].SlugOrPort.String)
|
||||
}
|
||||
@@ -3,3 +3,12 @@ package database
|
||||
import "github.com/google/uuid"
|
||||
|
||||
var PrebuildsSystemUserID = uuid.MustParse("c42fdf75-3097-471c-8c33-fb52454d81c0")
|
||||
|
||||
const (
|
||||
TailnetPeeringEventTypeAddedTunnel = "added_tunnel"
|
||||
TailnetPeeringEventTypeRemovedTunnel = "removed_tunnel"
|
||||
TailnetPeeringEventTypePeerUpdateNode = "peer_update_node"
|
||||
TailnetPeeringEventTypePeerUpdateDisconnected = "peer_update_disconnected"
|
||||
TailnetPeeringEventTypePeerUpdateLost = "peer_update_lost"
|
||||
TailnetPeeringEventTypePeerUpdateReadyForHandshake = "peer_update_ready_for_handshake"
|
||||
)
|
||||
|
||||
@@ -849,6 +849,10 @@ func ConnectionLogConnectionTypeFromAgentProtoConnectionType(typ agentproto.Conn
|
||||
return database.ConnectionTypeVscode, nil
|
||||
case agentproto.Connection_RECONNECTING_PTY:
|
||||
return database.ConnectionTypeReconnectingPty, nil
|
||||
case agentproto.Connection_WORKSPACE_APP:
|
||||
return database.ConnectionTypeWorkspaceApp, nil
|
||||
case agentproto.Connection_PORT_FORWARDING:
|
||||
return database.ConnectionTypePortForwarding, nil
|
||||
default:
|
||||
// Also Connection_TYPE_UNSPECIFIED, no mapping.
|
||||
return "", xerrors.Errorf("unknown agent connection type %q", typ)
|
||||
|
||||
@@ -412,7 +412,7 @@ var (
|
||||
ByOrgID: map[string]rbac.OrgPermissions{
|
||||
orgID.String(): {
|
||||
Member: rbac.Permissions(map[string][]policy.Action{
|
||||
rbac.ResourceWorkspace.Type: {policy.ActionRead, policy.ActionUpdate, policy.ActionCreateAgent, policy.ActionDeleteAgent},
|
||||
rbac.ResourceWorkspace.Type: {policy.ActionRead, policy.ActionUpdate, policy.ActionCreateAgent, policy.ActionDeleteAgent, policy.ActionUpdateAgent},
|
||||
}),
|
||||
},
|
||||
},
|
||||
@@ -442,7 +442,7 @@ var (
|
||||
rbac.ResourceProvisionerDaemon.Type: {policy.ActionCreate, policy.ActionRead, policy.ActionUpdate},
|
||||
rbac.ResourceUser.Type: rbac.ResourceUser.AvailableActions(),
|
||||
rbac.ResourceWorkspaceDormant.Type: {policy.ActionUpdate, policy.ActionDelete, policy.ActionWorkspaceStop},
|
||||
rbac.ResourceWorkspace.Type: {policy.ActionUpdate, policy.ActionDelete, policy.ActionWorkspaceStart, policy.ActionWorkspaceStop, policy.ActionSSH, policy.ActionCreateAgent, policy.ActionDeleteAgent},
|
||||
rbac.ResourceWorkspace.Type: {policy.ActionUpdate, policy.ActionDelete, policy.ActionWorkspaceStart, policy.ActionWorkspaceStop, policy.ActionSSH, policy.ActionCreateAgent, policy.ActionDeleteAgent, policy.ActionUpdateAgent},
|
||||
rbac.ResourceWorkspaceProxy.Type: {policy.ActionCreate, policy.ActionUpdate, policy.ActionDelete},
|
||||
rbac.ResourceDeploymentConfig.Type: {policy.ActionCreate, policy.ActionUpdate, policy.ActionDelete},
|
||||
rbac.ResourceNotificationMessage.Type: {policy.ActionCreate, policy.ActionRead, policy.ActionUpdate, policy.ActionDelete},
|
||||
@@ -461,6 +461,24 @@ var (
|
||||
Scope: rbac.ScopeAll,
|
||||
}.WithCachedASTValue()
|
||||
|
||||
subjectTailnetCoordinator = rbac.Subject{
|
||||
Type: rbac.SubjectTypeTailnetCoordinator,
|
||||
FriendlyName: "Tailnet Coordinator",
|
||||
ID: uuid.Nil.String(),
|
||||
Roles: rbac.Roles([]rbac.Role{
|
||||
{
|
||||
Identifier: rbac.RoleIdentifier{Name: "tailnetcoordinator"},
|
||||
DisplayName: "Tailnet Coordinator",
|
||||
Site: rbac.Permissions(map[string][]policy.Action{
|
||||
rbac.ResourceTailnetCoordinator.Type: {policy.WildcardSymbol},
|
||||
}),
|
||||
User: []rbac.Permission{},
|
||||
ByOrgID: map[string]rbac.OrgPermissions{},
|
||||
},
|
||||
}),
|
||||
Scope: rbac.ScopeAll,
|
||||
}.WithCachedASTValue()
|
||||
|
||||
subjectSystemOAuth2 = rbac.Subject{
|
||||
Type: rbac.SubjectTypeSystemOAuth,
|
||||
FriendlyName: "System OAuth2",
|
||||
@@ -726,6 +744,12 @@ func AsSystemRestricted(ctx context.Context) context.Context {
|
||||
return As(ctx, subjectSystemRestricted)
|
||||
}
|
||||
|
||||
// AsTailnetCoordinator returns a context with an actor that has permissions
|
||||
// required for tailnet coordinator operations.
|
||||
func AsTailnetCoordinator(ctx context.Context) context.Context {
|
||||
return As(ctx, subjectTailnetCoordinator)
|
||||
}
|
||||
|
||||
// AsSystemOAuth2 returns a context with an actor that has permissions
|
||||
// required for OAuth2 provider operations (token revocation, device codes, registration).
|
||||
func AsSystemOAuth2(ctx context.Context) context.Context {
|
||||
@@ -1588,6 +1612,20 @@ func (q *querier) CleanTailnetTunnels(ctx context.Context) error {
|
||||
return q.db.CleanTailnetTunnels(ctx)
|
||||
}
|
||||
|
||||
func (q *querier) CloseConnectionLogsAndCreateSessions(ctx context.Context, arg database.CloseConnectionLogsAndCreateSessionsParams) (int64, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceConnectionLog); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return q.db.CloseConnectionLogsAndCreateSessions(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) CloseOpenAgentConnectionLogsForWorkspace(ctx context.Context, arg database.CloseOpenAgentConnectionLogsForWorkspaceParams) (int64, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceConnectionLog); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return q.db.CloseOpenAgentConnectionLogsForWorkspace(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) CountAIBridgeInterceptions(ctx context.Context, arg database.CountAIBridgeInterceptionsParams) (int64, error) {
|
||||
prep, err := prepareSQLFilter(ctx, q.auth, policy.ActionRead, rbac.ResourceAibridgeInterception.Type)
|
||||
if err != nil {
|
||||
@@ -1623,6 +1661,13 @@ func (q *querier) CountConnectionLogs(ctx context.Context, arg database.CountCon
|
||||
return q.db.CountAuthorizedConnectionLogs(ctx, arg, prep)
|
||||
}
|
||||
|
||||
func (q *querier) CountGlobalWorkspaceSessions(ctx context.Context, arg database.CountGlobalWorkspaceSessionsParams) (int64, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceConnectionLog); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return q.db.CountGlobalWorkspaceSessions(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) CountInProgressPrebuilds(ctx context.Context) ([]database.CountInProgressPrebuildsRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceWorkspace.All()); err != nil {
|
||||
return nil, err
|
||||
@@ -1644,6 +1689,13 @@ func (q *querier) CountUnreadInboxNotificationsByUserID(ctx context.Context, use
|
||||
return q.db.CountUnreadInboxNotificationsByUserID(ctx, userID)
|
||||
}
|
||||
|
||||
func (q *querier) CountWorkspaceSessions(ctx context.Context, arg database.CountWorkspaceSessionsParams) (int64, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceConnectionLog); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return q.db.CountWorkspaceSessions(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) CreateUserSecret(ctx context.Context, arg database.CreateUserSecretParams) (database.UserSecret, error) {
|
||||
obj := rbac.ResourceUserSecret.WithOwner(arg.UserID.String())
|
||||
if err := q.authorizeContext(ctx, policy.ActionCreate, obj); err != nil {
|
||||
@@ -1703,13 +1755,6 @@ func (q *querier) DeleteApplicationConnectAPIKeysByUserID(ctx context.Context, u
|
||||
return q.db.DeleteApplicationConnectAPIKeysByUserID(ctx, userID)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteBoundaryUsageStatsByReplicaID(ctx context.Context, replicaID uuid.UUID) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceBoundaryUsage); err != nil {
|
||||
return err
|
||||
}
|
||||
return q.db.DeleteBoundaryUsageStatsByReplicaID(ctx, replicaID)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteCryptoKey(ctx context.Context, arg database.DeleteCryptoKeyParams) (database.CryptoKey, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceCryptoKey); err != nil {
|
||||
return database.CryptoKey{}, err
|
||||
@@ -1932,14 +1977,14 @@ func (q *querier) DeleteTailnetTunnel(ctx context.Context, arg database.DeleteTa
|
||||
return q.db.DeleteTailnetTunnel(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteTask(ctx context.Context, arg database.DeleteTaskParams) (database.TaskTable, error) {
|
||||
func (q *querier) DeleteTask(ctx context.Context, arg database.DeleteTaskParams) (uuid.UUID, error) {
|
||||
task, err := q.db.GetTaskByID(ctx, arg.ID)
|
||||
if err != nil {
|
||||
return database.TaskTable{}, err
|
||||
return uuid.UUID{}, err
|
||||
}
|
||||
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, task.RBACObject()); err != nil {
|
||||
return database.TaskTable{}, err
|
||||
return uuid.UUID{}, err
|
||||
}
|
||||
|
||||
return q.db.DeleteTask(ctx, arg)
|
||||
@@ -2125,6 +2170,13 @@ func (q *querier) FindMatchingPresetID(ctx context.Context, arg database.FindMat
|
||||
return q.db.FindMatchingPresetID(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) FindOrCreateSessionForDisconnect(ctx context.Context, arg database.FindOrCreateSessionForDisconnectParams) (interface{}, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceConnectionLog); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q.db.FindOrCreateSessionForDisconnect(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) GetAIBridgeInterceptionByID(ctx context.Context, id uuid.UUID) (database.AIBridgeInterception, error) {
|
||||
return fetch(q.log, q.auth, q.db.GetAIBridgeInterceptionByID)(ctx, id)
|
||||
}
|
||||
@@ -2209,6 +2261,13 @@ func (q *querier) GetAllTailnetCoordinators(ctx context.Context) ([]database.Tai
|
||||
return q.db.GetAllTailnetCoordinators(ctx)
|
||||
}
|
||||
|
||||
func (q *querier) GetAllTailnetPeeringEventsByPeerID(ctx context.Context, srcPeerID uuid.NullUUID) ([]database.TailnetPeeringEvent, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q.db.GetAllTailnetPeeringEventsByPeerID(ctx, srcPeerID)
|
||||
}
|
||||
|
||||
func (q *querier) GetAllTailnetPeers(ctx context.Context) ([]database.TailnetPeer, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return nil, err
|
||||
@@ -2223,6 +2282,13 @@ func (q *querier) GetAllTailnetTunnels(ctx context.Context) ([]database.TailnetT
|
||||
return q.db.GetAllTailnetTunnels(ctx)
|
||||
}
|
||||
|
||||
func (q *querier) GetAndResetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (database.GetAndResetBoundaryUsageSummaryRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceBoundaryUsage); err != nil {
|
||||
return database.GetAndResetBoundaryUsageSummaryRow{}, err
|
||||
}
|
||||
return q.db.GetAndResetBoundaryUsageSummary(ctx, maxStalenessMs)
|
||||
}
|
||||
|
||||
func (q *querier) GetAnnouncementBanners(ctx context.Context) (string, error) {
|
||||
// No authz checks
|
||||
return q.db.GetAnnouncementBanners(ctx)
|
||||
@@ -2271,11 +2337,18 @@ func (q *querier) GetAuthorizationUserRoles(ctx context.Context, userID uuid.UUI
|
||||
return q.db.GetAuthorizationUserRoles(ctx, userID)
|
||||
}
|
||||
|
||||
func (q *querier) GetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (database.GetBoundaryUsageSummaryRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceBoundaryUsage); err != nil {
|
||||
return database.GetBoundaryUsageSummaryRow{}, err
|
||||
func (q *querier) GetConnectionLogByConnectionID(ctx context.Context, arg database.GetConnectionLogByConnectionIDParams) (database.ConnectionLog, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceConnectionLog); err != nil {
|
||||
return database.ConnectionLog{}, err
|
||||
}
|
||||
return q.db.GetBoundaryUsageSummary(ctx, maxStalenessMs)
|
||||
return q.db.GetConnectionLogByConnectionID(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) GetConnectionLogsBySessionIDs(ctx context.Context, sessionIDs []uuid.UUID) ([]database.ConnectionLog, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceConnectionLog); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q.db.GetConnectionLogsBySessionIDs(ctx, sessionIDs)
|
||||
}
|
||||
|
||||
func (q *querier) GetConnectionLogsOffset(ctx context.Context, arg database.GetConnectionLogsOffsetParams) ([]database.GetConnectionLogsOffsetRow, error) {
|
||||
@@ -2453,6 +2526,13 @@ func (q *querier) GetGitSSHKey(ctx context.Context, userID uuid.UUID) (database.
|
||||
return fetchWithAction(q.log, q.auth, policy.ActionReadPersonal, q.db.GetGitSSHKey)(ctx, userID)
|
||||
}
|
||||
|
||||
func (q *querier) GetGlobalWorkspaceSessionsOffset(ctx context.Context, arg database.GetGlobalWorkspaceSessionsOffsetParams) ([]database.GetGlobalWorkspaceSessionsOffsetRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceConnectionLog); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q.db.GetGlobalWorkspaceSessionsOffset(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) GetGroupByID(ctx context.Context, id uuid.UUID) (database.Group, error) {
|
||||
return fetch(q.log, q.auth, q.db.GetGroupByID)(ctx, id)
|
||||
}
|
||||
@@ -2719,6 +2799,15 @@ func (q *querier) GetOAuthSigningKey(ctx context.Context) (string, error) {
|
||||
return q.db.GetOAuthSigningKey(ctx)
|
||||
}
|
||||
|
||||
func (q *querier) GetOngoingAgentConnectionsLast24h(ctx context.Context, arg database.GetOngoingAgentConnectionsLast24hParams) ([]database.GetOngoingAgentConnectionsLast24hRow, error) {
|
||||
// This is a system-level read; authorization comes from the
|
||||
// caller using dbauthz.AsSystemRestricted(ctx).
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceSystem); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q.db.GetOngoingAgentConnectionsLast24h(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) GetOrganizationByID(ctx context.Context, id uuid.UUID) (database.Organization, error) {
|
||||
return fetch(q.log, q.auth, q.db.GetOrganizationByID)(ctx, id)
|
||||
}
|
||||
@@ -3088,6 +3177,13 @@ func (q *querier) GetTailnetTunnelPeerBindings(ctx context.Context, srcID uuid.U
|
||||
return q.db.GetTailnetTunnelPeerBindings(ctx, srcID)
|
||||
}
|
||||
|
||||
func (q *querier) GetTailnetTunnelPeerBindingsByDstID(ctx context.Context, dstID uuid.UUID) ([]database.GetTailnetTunnelPeerBindingsByDstIDRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q.db.GetTailnetTunnelPeerBindingsByDstID(ctx, dstID)
|
||||
}
|
||||
|
||||
func (q *querier) GetTailnetTunnelPeerIDs(ctx context.Context, srcID uuid.UUID) ([]database.GetTailnetTunnelPeerIDsRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return nil, err
|
||||
@@ -3893,6 +3989,14 @@ func (q *querier) GetWorkspaceBuildByWorkspaceIDAndBuildNumber(ctx context.Conte
|
||||
return q.db.GetWorkspaceBuildByWorkspaceIDAndBuildNumber(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) GetWorkspaceBuildMetricsByResourceID(ctx context.Context, id uuid.UUID) (database.GetWorkspaceBuildMetricsByResourceIDRow, error) {
|
||||
// Verify access to the resource first.
|
||||
if _, err := q.GetWorkspaceResourceByID(ctx, id); err != nil {
|
||||
return database.GetWorkspaceBuildMetricsByResourceIDRow{}, err
|
||||
}
|
||||
return q.db.GetWorkspaceBuildMetricsByResourceID(ctx, id)
|
||||
}
|
||||
|
||||
func (q *querier) GetWorkspaceBuildParameters(ctx context.Context, workspaceBuildID uuid.UUID) ([]database.WorkspaceBuildParameter, error) {
|
||||
// Authorized call to get the workspace build. If we can read the build,
|
||||
// we can read the params.
|
||||
@@ -4085,6 +4189,13 @@ func (q *querier) GetWorkspaceResourcesCreatedAfter(ctx context.Context, created
|
||||
return q.db.GetWorkspaceResourcesCreatedAfter(ctx, createdAt)
|
||||
}
|
||||
|
||||
func (q *querier) GetWorkspaceSessionsOffset(ctx context.Context, arg database.GetWorkspaceSessionsOffsetParams) ([]database.GetWorkspaceSessionsOffsetRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceConnectionLog); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q.db.GetWorkspaceSessionsOffset(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) GetWorkspaceUniqueOwnerCountByTemplateIDs(ctx context.Context, templateIDs []uuid.UUID) ([]database.GetWorkspaceUniqueOwnerCountByTemplateIDsRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceSystem); err != nil {
|
||||
return nil, err
|
||||
@@ -4399,6 +4510,13 @@ func (q *querier) InsertReplica(ctx context.Context, arg database.InsertReplicaP
|
||||
return q.db.InsertReplica(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) InsertTailnetPeeringEvent(ctx context.Context, arg database.InsertTailnetPeeringEventParams) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionCreate, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return err
|
||||
}
|
||||
return q.db.InsertTailnetPeeringEvent(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) InsertTask(ctx context.Context, arg database.InsertTaskParams) (database.TaskTable, error) {
|
||||
// Ensure the actor can access the specified template version (and thus its template).
|
||||
if _, err := q.GetTemplateVersionByID(ctx, arg.TemplateVersionID); err != nil {
|
||||
@@ -4891,13 +5009,6 @@ func (q *querier) RemoveUserFromGroups(ctx context.Context, arg database.RemoveU
|
||||
return q.db.RemoveUserFromGroups(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) ResetBoundaryUsageStats(ctx context.Context) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceBoundaryUsage); err != nil {
|
||||
return err
|
||||
}
|
||||
return q.db.ResetBoundaryUsageStats(ctx)
|
||||
}
|
||||
|
||||
func (q *querier) RevokeDBCryptKey(ctx context.Context, activeKeyDigest string) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceSystem); err != nil {
|
||||
return err
|
||||
@@ -4954,6 +5065,13 @@ func (q *querier) UpdateAPIKeyByID(ctx context.Context, arg database.UpdateAPIKe
|
||||
return update(q.log, q.auth, fetch, q.db.UpdateAPIKeyByID)(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateConnectionLogSessionID(ctx context.Context, arg database.UpdateConnectionLogSessionIDParams) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceConnectionLog); err != nil {
|
||||
return err
|
||||
}
|
||||
return q.db.UpdateConnectionLogSessionID(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateCryptoKeyDeletesAt(ctx context.Context, arg database.UpdateCryptoKeyDeletesAtParams) (database.CryptoKey, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceCryptoKey); err != nil {
|
||||
return database.CryptoKey{}, err
|
||||
@@ -5726,6 +5844,19 @@ func (q *querier) UpdateWorkspaceAgentConnectionByID(ctx context.Context, arg da
|
||||
return q.db.UpdateWorkspaceAgentConnectionByID(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateWorkspaceAgentDisplayAppsByID(ctx context.Context, arg database.UpdateWorkspaceAgentDisplayAppsByIDParams) error {
|
||||
workspace, err := q.db.GetWorkspaceByAgentID(ctx, arg.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdateAgent, workspace); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return q.db.UpdateWorkspaceAgentDisplayAppsByID(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateWorkspaceAgentLifecycleStateByID(ctx context.Context, arg database.UpdateWorkspaceAgentLifecycleStateByIDParams) error {
|
||||
workspace, err := q.db.GetWorkspaceByAgentID(ctx, arg.ID)
|
||||
if err != nil {
|
||||
@@ -6195,9 +6326,9 @@ func (q *querier) UpsertWorkspaceApp(ctx context.Context, arg database.UpsertWor
|
||||
return q.db.UpsertWorkspaceApp(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpsertWorkspaceAppAuditSession(ctx context.Context, arg database.UpsertWorkspaceAppAuditSessionParams) (bool, error) {
|
||||
func (q *querier) UpsertWorkspaceAppAuditSession(ctx context.Context, arg database.UpsertWorkspaceAppAuditSessionParams) (database.UpsertWorkspaceAppAuditSessionRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceSystem); err != nil {
|
||||
return false, err
|
||||
return database.UpsertWorkspaceAppAuditSessionRow{}, err
|
||||
}
|
||||
return q.db.UpsertWorkspaceAppAuditSession(ctx, arg)
|
||||
}
|
||||
|
||||
@@ -277,11 +277,6 @@ func (s *MethodTestSuite) TestAPIKey() {
|
||||
dbm.EXPECT().DeleteApplicationConnectAPIKeysByUserID(gomock.Any(), a.UserID).Return(nil).AnyTimes()
|
||||
check.Args(a.UserID).Asserts(rbac.ResourceApiKey.WithOwner(a.UserID.String()), policy.ActionDelete).Returns()
|
||||
}))
|
||||
s.Run("DeleteBoundaryUsageStatsByReplicaID", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
replicaID := uuid.New()
|
||||
dbm.EXPECT().DeleteBoundaryUsageStatsByReplicaID(gomock.Any(), replicaID).Return(nil).AnyTimes()
|
||||
check.Args(replicaID).Asserts(rbac.ResourceBoundaryUsage, policy.ActionDelete)
|
||||
}))
|
||||
s.Run("DeleteExternalAuthLink", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
a := testutil.Fake(s.T(), faker, database.ExternalAuthLink{})
|
||||
dbm.EXPECT().GetExternalAuthLink(gomock.Any(), database.GetExternalAuthLinkParams{ProviderID: a.ProviderID, UserID: a.UserID}).Return(a, nil).AnyTimes()
|
||||
@@ -367,6 +362,11 @@ func (s *MethodTestSuite) TestConnectionLogs() {
|
||||
dbm.EXPECT().DeleteOldConnectionLogs(gomock.Any(), database.DeleteOldConnectionLogsParams{}).Return(int64(0), nil).AnyTimes()
|
||||
check.Args(database.DeleteOldConnectionLogsParams{}).Asserts(rbac.ResourceSystem, policy.ActionDelete)
|
||||
}))
|
||||
s.Run("CloseOpenAgentConnectionLogsForWorkspace", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
arg := database.CloseOpenAgentConnectionLogsForWorkspaceParams{}
|
||||
dbm.EXPECT().CloseOpenAgentConnectionLogsForWorkspace(gomock.Any(), arg).Return(int64(0), nil).AnyTimes()
|
||||
check.Args(arg).Asserts(rbac.ResourceConnectionLog, policy.ActionUpdate)
|
||||
}))
|
||||
}
|
||||
|
||||
func (s *MethodTestSuite) TestFile() {
|
||||
@@ -532,9 +532,9 @@ func (s *MethodTestSuite) TestGroup() {
|
||||
dbm.EXPECT().RemoveUserFromGroups(gomock.Any(), arg).Return(slice.New(g1.ID, g2.ID), nil).AnyTimes()
|
||||
check.Args(arg).Asserts(rbac.ResourceSystem, policy.ActionUpdate).Returns(slice.New(g1.ID, g2.ID))
|
||||
}))
|
||||
s.Run("ResetBoundaryUsageStats", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
dbm.EXPECT().ResetBoundaryUsageStats(gomock.Any()).Return(nil).AnyTimes()
|
||||
check.Args().Asserts(rbac.ResourceBoundaryUsage, policy.ActionDelete)
|
||||
s.Run("GetAndResetBoundaryUsageSummary", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
dbm.EXPECT().GetAndResetBoundaryUsageSummary(gomock.Any(), int64(1000)).Return(database.GetAndResetBoundaryUsageSummaryRow{}, nil).AnyTimes()
|
||||
check.Args(int64(1000)).Asserts(rbac.ResourceBoundaryUsage, policy.ActionDelete)
|
||||
}))
|
||||
|
||||
s.Run("UpdateGroupByID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
@@ -1929,6 +1929,17 @@ func (s *MethodTestSuite) TestWorkspace() {
|
||||
dbm.EXPECT().UpdateWorkspaceAgentStartupByID(gomock.Any(), arg).Return(nil).AnyTimes()
|
||||
check.Args(arg).Asserts(w, policy.ActionUpdate).Returns()
|
||||
}))
|
||||
s.Run("UpdateWorkspaceAgentDisplayAppsByID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
w := testutil.Fake(s.T(), faker, database.Workspace{})
|
||||
agt := testutil.Fake(s.T(), faker, database.WorkspaceAgent{})
|
||||
arg := database.UpdateWorkspaceAgentDisplayAppsByIDParams{
|
||||
ID: agt.ID,
|
||||
DisplayApps: []database.DisplayApp{database.DisplayAppVscode},
|
||||
}
|
||||
dbm.EXPECT().GetWorkspaceByAgentID(gomock.Any(), agt.ID).Return(w, nil).AnyTimes()
|
||||
dbm.EXPECT().UpdateWorkspaceAgentDisplayAppsByID(gomock.Any(), arg).Return(nil).AnyTimes()
|
||||
check.Args(arg).Asserts(w, policy.ActionUpdateAgent).Returns()
|
||||
}))
|
||||
s.Run("GetWorkspaceAgentLogsAfter", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
ws := testutil.Fake(s.T(), faker, database.Workspace{})
|
||||
agt := testutil.Fake(s.T(), faker, database.WorkspaceAgent{})
|
||||
@@ -2030,6 +2041,18 @@ func (s *MethodTestSuite) TestWorkspace() {
|
||||
dbm.EXPECT().GetWorkspaceByID(gomock.Any(), build.WorkspaceID).Return(ws, nil).AnyTimes()
|
||||
check.Args(res.ID).Asserts(ws, policy.ActionRead).Returns(res)
|
||||
}))
|
||||
s.Run("GetWorkspaceBuildMetricsByResourceID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
ws := testutil.Fake(s.T(), faker, database.Workspace{})
|
||||
build := testutil.Fake(s.T(), faker, database.WorkspaceBuild{WorkspaceID: ws.ID})
|
||||
job := testutil.Fake(s.T(), faker, database.ProvisionerJob{ID: build.JobID, Type: database.ProvisionerJobTypeWorkspaceBuild})
|
||||
res := testutil.Fake(s.T(), faker, database.WorkspaceResource{JobID: build.JobID})
|
||||
dbm.EXPECT().GetWorkspaceResourceByID(gomock.Any(), res.ID).Return(res, nil).AnyTimes()
|
||||
dbm.EXPECT().GetProvisionerJobByID(gomock.Any(), res.JobID).Return(job, nil).AnyTimes()
|
||||
dbm.EXPECT().GetWorkspaceBuildByJobID(gomock.Any(), res.JobID).Return(build, nil).AnyTimes()
|
||||
dbm.EXPECT().GetWorkspaceByID(gomock.Any(), build.WorkspaceID).Return(ws, nil).AnyTimes()
|
||||
dbm.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), res.ID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{}, nil).AnyTimes()
|
||||
check.Args(res.ID).Asserts(ws, policy.ActionRead).Returns(database.GetWorkspaceBuildMetricsByResourceIDRow{})
|
||||
}))
|
||||
s.Run("Build/GetWorkspaceResourcesByJobID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
ws := testutil.Fake(s.T(), faker, database.Workspace{})
|
||||
build := testutil.Fake(s.T(), faker, database.WorkspaceBuild{WorkspaceID: ws.ID})
|
||||
@@ -2506,8 +2529,8 @@ func (s *MethodTestSuite) TestTasks() {
|
||||
DeletedAt: dbtime.Now(),
|
||||
}
|
||||
dbm.EXPECT().GetTaskByID(gomock.Any(), task.ID).Return(task, nil).AnyTimes()
|
||||
dbm.EXPECT().DeleteTask(gomock.Any(), arg).Return(database.TaskTable{}, nil).AnyTimes()
|
||||
check.Args(arg).Asserts(task, policy.ActionDelete).Returns(database.TaskTable{})
|
||||
dbm.EXPECT().DeleteTask(gomock.Any(), arg).Return(task.ID, nil).AnyTimes()
|
||||
check.Args(arg).Asserts(task, policy.ActionDelete).Returns(task.ID)
|
||||
}))
|
||||
s.Run("InsertTask", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
tpl := testutil.Fake(s.T(), faker, database.Template{})
|
||||
@@ -2823,6 +2846,10 @@ func (s *MethodTestSuite) TestTailnetFunctions() {
|
||||
check.Args(uuid.New()).
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetTailnetTunnelPeerBindingsByDstID", s.Subtest(func(_ database.Store, check *expects) {
|
||||
check.Args(uuid.New()).
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetTailnetTunnelPeerIDs", s.Subtest(func(_ database.Store, check *expects) {
|
||||
check.Args(uuid.New()).
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionRead)
|
||||
@@ -2980,10 +3007,6 @@ func (s *MethodTestSuite) TestSystemFunctions() {
|
||||
dbm.EXPECT().GetAuthorizationUserRoles(gomock.Any(), u.ID).Return(database.GetAuthorizationUserRolesRow{}, nil).AnyTimes()
|
||||
check.Args(u.ID).Asserts(rbac.ResourceSystem, policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetBoundaryUsageSummary", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
dbm.EXPECT().GetBoundaryUsageSummary(gomock.Any(), int64(1000)).Return(database.GetBoundaryUsageSummaryRow{}, nil).AnyTimes()
|
||||
check.Args(int64(1000)).Asserts(rbac.ResourceBoundaryUsage, policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetDERPMeshKey", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
dbm.EXPECT().GetDERPMeshKey(gomock.Any()).Return("testing", nil).AnyTimes()
|
||||
check.Args().Asserts(rbac.ResourceSystem, policy.ActionRead)
|
||||
@@ -3295,7 +3318,7 @@ func (s *MethodTestSuite) TestSystemFunctions() {
|
||||
agent := testutil.Fake(s.T(), faker, database.WorkspaceAgent{})
|
||||
app := testutil.Fake(s.T(), faker, database.WorkspaceApp{})
|
||||
arg := database.UpsertWorkspaceAppAuditSessionParams{AgentID: agent.ID, AppID: app.ID, UserID: u.ID, Ip: "127.0.0.1"}
|
||||
dbm.EXPECT().UpsertWorkspaceAppAuditSession(gomock.Any(), arg).Return(true, nil).AnyTimes()
|
||||
dbm.EXPECT().UpsertWorkspaceAppAuditSession(gomock.Any(), arg).Return(database.UpsertWorkspaceAppAuditSessionRow{NewOrStale: true}, nil).AnyTimes()
|
||||
check.Args(arg).Asserts(rbac.ResourceSystem, policy.ActionUpdate)
|
||||
}))
|
||||
s.Run("InsertWorkspaceAgentScriptTimings", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
|
||||
@@ -35,12 +35,25 @@ import (
|
||||
var errMatchAny = xerrors.New("match any error")
|
||||
|
||||
var skipMethods = map[string]string{
|
||||
"InTx": "Not relevant",
|
||||
"Ping": "Not relevant",
|
||||
"PGLocks": "Not relevant",
|
||||
"Wrappers": "Not relevant",
|
||||
"AcquireLock": "Not relevant",
|
||||
"TryAcquireLock": "Not relevant",
|
||||
|
||||
"InTx": "Not relevant",
|
||||
"Ping": "Not relevant",
|
||||
"PGLocks": "Not relevant",
|
||||
"Wrappers": "Not relevant",
|
||||
"AcquireLock": "Not relevant",
|
||||
"TryAcquireLock": "Not relevant",
|
||||
"GetOngoingAgentConnectionsLast24h": "Hackathon",
|
||||
"InsertTailnetPeeringEvent": "Hackathon",
|
||||
"CloseConnectionLogsAndCreateSessions": "Hackathon",
|
||||
"CountGlobalWorkspaceSessions": "Hackathon",
|
||||
"CountWorkspaceSessions": "Hackathon",
|
||||
"FindOrCreateSessionForDisconnect": "Hackathon",
|
||||
"GetConnectionLogByConnectionID": "Hackathon",
|
||||
"GetConnectionLogsBySessionIDs": "Hackathon",
|
||||
"GetGlobalWorkspaceSessionsOffset": "Hackathon",
|
||||
"GetWorkspaceSessionsOffset": "Hackathon",
|
||||
"UpdateConnectionLogSessionID": "Hackathon",
|
||||
"GetAllTailnetPeeringEventsByPeerID": "Hackathon",
|
||||
}
|
||||
|
||||
// TestMethodTestSuite runs MethodTestSuite.
|
||||
|
||||
@@ -58,6 +58,61 @@ type WorkspaceBuildBuilder struct {
|
||||
jobStatus database.ProvisionerJobStatus
|
||||
taskAppID uuid.UUID
|
||||
taskSeed database.TaskTable
|
||||
|
||||
// Individual timestamp fields for job customization.
|
||||
jobCreatedAt time.Time
|
||||
jobStartedAt time.Time
|
||||
jobUpdatedAt time.Time
|
||||
jobCompletedAt time.Time
|
||||
|
||||
jobError string // Error message for failed jobs
|
||||
jobErrorCode string // Error code for failed jobs
|
||||
}
|
||||
|
||||
// BuilderOption is a functional option for customizing job timestamps
|
||||
// on status methods.
|
||||
type BuilderOption func(*WorkspaceBuildBuilder)
|
||||
|
||||
// WithJobCreatedAt sets the CreatedAt timestamp for the provisioner job.
|
||||
func WithJobCreatedAt(t time.Time) BuilderOption {
|
||||
return func(b *WorkspaceBuildBuilder) {
|
||||
b.jobCreatedAt = t
|
||||
}
|
||||
}
|
||||
|
||||
// WithJobStartedAt sets the StartedAt timestamp for the provisioner job.
|
||||
func WithJobStartedAt(t time.Time) BuilderOption {
|
||||
return func(b *WorkspaceBuildBuilder) {
|
||||
b.jobStartedAt = t
|
||||
}
|
||||
}
|
||||
|
||||
// WithJobUpdatedAt sets the UpdatedAt timestamp for the provisioner job.
|
||||
func WithJobUpdatedAt(t time.Time) BuilderOption {
|
||||
return func(b *WorkspaceBuildBuilder) {
|
||||
b.jobUpdatedAt = t
|
||||
}
|
||||
}
|
||||
|
||||
// WithJobCompletedAt sets the CompletedAt timestamp for the provisioner job.
|
||||
func WithJobCompletedAt(t time.Time) BuilderOption {
|
||||
return func(b *WorkspaceBuildBuilder) {
|
||||
b.jobCompletedAt = t
|
||||
}
|
||||
}
|
||||
|
||||
// WithJobError sets the error message for the provisioner job.
|
||||
func WithJobError(msg string) BuilderOption {
|
||||
return func(b *WorkspaceBuildBuilder) {
|
||||
b.jobError = msg
|
||||
}
|
||||
}
|
||||
|
||||
// WithJobErrorCode sets the error code for the provisioner job.
|
||||
func WithJobErrorCode(code string) BuilderOption {
|
||||
return func(b *WorkspaceBuildBuilder) {
|
||||
b.jobErrorCode = code
|
||||
}
|
||||
}
|
||||
|
||||
// WorkspaceBuild generates a workspace build for the provided workspace.
|
||||
@@ -141,18 +196,59 @@ func (b WorkspaceBuildBuilder) WithTask(taskSeed database.TaskTable, appSeed *sd
|
||||
})
|
||||
}
|
||||
|
||||
func (b WorkspaceBuildBuilder) Starting() WorkspaceBuildBuilder {
|
||||
// Starting sets the job to running status.
|
||||
func (b WorkspaceBuildBuilder) Starting(opts ...BuilderOption) WorkspaceBuildBuilder {
|
||||
//nolint: revive // returns modified struct
|
||||
b.jobStatus = database.ProvisionerJobStatusRunning
|
||||
for _, opt := range opts {
|
||||
opt(&b)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b WorkspaceBuildBuilder) Pending() WorkspaceBuildBuilder {
|
||||
// Pending sets the job to pending status.
|
||||
func (b WorkspaceBuildBuilder) Pending(opts ...BuilderOption) WorkspaceBuildBuilder {
|
||||
//nolint: revive // returns modified struct
|
||||
b.jobStatus = database.ProvisionerJobStatusPending
|
||||
for _, opt := range opts {
|
||||
opt(&b)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b WorkspaceBuildBuilder) Canceled() WorkspaceBuildBuilder {
|
||||
// Canceled sets the job to canceled status.
|
||||
func (b WorkspaceBuildBuilder) Canceled(opts ...BuilderOption) WorkspaceBuildBuilder {
|
||||
//nolint: revive // returns modified struct
|
||||
b.jobStatus = database.ProvisionerJobStatusCanceled
|
||||
for _, opt := range opts {
|
||||
opt(&b)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Succeeded sets the job to succeeded status.
|
||||
// This is the default status.
|
||||
func (b WorkspaceBuildBuilder) Succeeded(opts ...BuilderOption) WorkspaceBuildBuilder {
|
||||
//nolint: revive // returns modified struct
|
||||
b.jobStatus = database.ProvisionerJobStatusSucceeded
|
||||
for _, opt := range opts {
|
||||
opt(&b)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Failed sets the provisioner job to a failed state. Use WithJobError and
|
||||
// WithJobErrorCode options to set the error message and code. If no error
|
||||
// message is provided, "failed" is used as the default.
|
||||
func (b WorkspaceBuildBuilder) Failed(opts ...BuilderOption) WorkspaceBuildBuilder {
|
||||
//nolint: revive // returns modified struct
|
||||
b.jobStatus = database.ProvisionerJobStatusFailed
|
||||
for _, opt := range opts {
|
||||
opt(&b)
|
||||
}
|
||||
if b.jobError == "" {
|
||||
b.jobError = "failed"
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -267,8 +363,8 @@ func (b WorkspaceBuildBuilder) doInTX() WorkspaceResponse {
|
||||
|
||||
job, err := b.db.InsertProvisionerJob(ownerCtx, database.InsertProvisionerJobParams{
|
||||
ID: jobID,
|
||||
CreatedAt: dbtime.Now(),
|
||||
UpdatedAt: dbtime.Now(),
|
||||
CreatedAt: takeFirstTime(b.jobCreatedAt, b.ws.CreatedAt, dbtime.Now()),
|
||||
UpdatedAt: takeFirstTime(b.jobCreatedAt, b.ws.CreatedAt, dbtime.Now()),
|
||||
OrganizationID: b.ws.OrganizationID,
|
||||
InitiatorID: b.ws.OwnerID,
|
||||
Provisioner: database.ProvisionerTypeEcho,
|
||||
@@ -291,11 +387,12 @@ func (b WorkspaceBuildBuilder) doInTX() WorkspaceResponse {
|
||||
// might need to do this multiple times if we got a template version
|
||||
// import job as well
|
||||
b.logger.Debug(context.Background(), "looping to acquire provisioner job")
|
||||
startedAt := takeFirstTime(b.jobStartedAt, dbtime.Now())
|
||||
for {
|
||||
j, err := b.db.AcquireProvisionerJob(ownerCtx, database.AcquireProvisionerJobParams{
|
||||
OrganizationID: job.OrganizationID,
|
||||
StartedAt: sql.NullTime{
|
||||
Time: dbtime.Now(),
|
||||
Time: startedAt,
|
||||
Valid: true,
|
||||
},
|
||||
WorkerID: uuid.NullUUID{
|
||||
@@ -311,32 +408,54 @@ func (b WorkspaceBuildBuilder) doInTX() WorkspaceResponse {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !b.jobUpdatedAt.IsZero() {
|
||||
err = b.db.UpdateProvisionerJobByID(ownerCtx, database.UpdateProvisionerJobByIDParams{
|
||||
ID: job.ID,
|
||||
UpdatedAt: b.jobUpdatedAt,
|
||||
})
|
||||
require.NoError(b.t, err, "update job updated_at")
|
||||
}
|
||||
case database.ProvisionerJobStatusCanceled:
|
||||
// Set provisioner job status to 'canceled'
|
||||
b.logger.Debug(context.Background(), "canceling the provisioner job")
|
||||
now := dbtime.Now()
|
||||
completedAt := takeFirstTime(b.jobCompletedAt, dbtime.Now())
|
||||
err = b.db.UpdateProvisionerJobWithCancelByID(ownerCtx, database.UpdateProvisionerJobWithCancelByIDParams{
|
||||
ID: jobID,
|
||||
CanceledAt: sql.NullTime{
|
||||
Time: now,
|
||||
Time: completedAt,
|
||||
Valid: true,
|
||||
},
|
||||
CompletedAt: sql.NullTime{
|
||||
Time: now,
|
||||
Time: completedAt,
|
||||
Valid: true,
|
||||
},
|
||||
})
|
||||
require.NoError(b.t, err, "cancel job")
|
||||
case database.ProvisionerJobStatusFailed:
|
||||
b.logger.Debug(context.Background(), "failing the provisioner job")
|
||||
completedAt := takeFirstTime(b.jobCompletedAt, dbtime.Now())
|
||||
err = b.db.UpdateProvisionerJobWithCompleteByID(ownerCtx, database.UpdateProvisionerJobWithCompleteByIDParams{
|
||||
ID: job.ID,
|
||||
UpdatedAt: completedAt,
|
||||
Error: sql.NullString{String: b.jobError, Valid: b.jobError != ""},
|
||||
ErrorCode: sql.NullString{String: b.jobErrorCode, Valid: b.jobErrorCode != ""},
|
||||
CompletedAt: sql.NullTime{
|
||||
Time: completedAt,
|
||||
Valid: true,
|
||||
},
|
||||
})
|
||||
require.NoError(b.t, err, "fail job")
|
||||
default:
|
||||
// By default, consider jobs in 'succeeded' status
|
||||
b.logger.Debug(context.Background(), "completing the provisioner job")
|
||||
completedAt := takeFirstTime(b.jobCompletedAt, dbtime.Now())
|
||||
err = b.db.UpdateProvisionerJobWithCompleteByID(ownerCtx, database.UpdateProvisionerJobWithCompleteByIDParams{
|
||||
ID: job.ID,
|
||||
UpdatedAt: dbtime.Now(),
|
||||
UpdatedAt: completedAt,
|
||||
Error: sql.NullString{},
|
||||
ErrorCode: sql.NullString{},
|
||||
CompletedAt: sql.NullTime{
|
||||
Time: dbtime.Now(),
|
||||
Time: completedAt,
|
||||
Valid: true,
|
||||
},
|
||||
})
|
||||
@@ -751,6 +870,16 @@ func takeFirst[Value comparable](values ...Value) Value {
|
||||
})
|
||||
}
|
||||
|
||||
// takeFirstTime returns the first non-zero time.Time.
|
||||
func takeFirstTime(values ...time.Time) time.Time {
|
||||
for _, v := range values {
|
||||
if !v.IsZero() {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// mustWorkspaceAppByWorkspaceAndBuildAndAppID finds a workspace app by
|
||||
// workspace ID, build number, and app ID. It returns the workspace app
|
||||
// if found, otherwise fails the test.
|
||||
|
||||
@@ -86,18 +86,27 @@ func ConnectionLog(t testing.TB, db database.Store, seed database.UpsertConnecti
|
||||
WorkspaceID: takeFirst(seed.WorkspaceID, uuid.New()),
|
||||
WorkspaceName: takeFirst(seed.WorkspaceName, testutil.GetRandomName(t)),
|
||||
AgentName: takeFirst(seed.AgentName, testutil.GetRandomName(t)),
|
||||
Type: takeFirst(seed.Type, database.ConnectionTypeSsh),
|
||||
AgentID: uuid.NullUUID{
|
||||
UUID: takeFirst(seed.AgentID.UUID, uuid.Nil),
|
||||
Valid: takeFirst(seed.AgentID.Valid, false),
|
||||
},
|
||||
Type: takeFirst(seed.Type, database.ConnectionTypeSsh),
|
||||
Code: sql.NullInt32{
|
||||
Int32: takeFirst(seed.Code.Int32, 0),
|
||||
Valid: takeFirst(seed.Code.Valid, false),
|
||||
},
|
||||
Ip: pqtype.Inet{
|
||||
IPNet: net.IPNet{
|
||||
IP: net.IPv4(127, 0, 0, 1),
|
||||
Mask: net.IPv4Mask(255, 255, 255, 255),
|
||||
},
|
||||
Valid: true,
|
||||
},
|
||||
Ip: func() pqtype.Inet {
|
||||
if seed.Ip.Valid {
|
||||
return seed.Ip
|
||||
}
|
||||
return pqtype.Inet{
|
||||
IPNet: net.IPNet{
|
||||
IP: net.IPv4(127, 0, 0, 1),
|
||||
Mask: net.IPv4Mask(255, 255, 255, 255),
|
||||
},
|
||||
Valid: true,
|
||||
}
|
||||
}(),
|
||||
UserAgent: sql.NullString{
|
||||
String: takeFirst(seed.UserAgent.String, ""),
|
||||
Valid: takeFirst(seed.UserAgent.Valid, false),
|
||||
@@ -118,6 +127,18 @@ func ConnectionLog(t testing.TB, db database.Store, seed database.UpsertConnecti
|
||||
String: takeFirst(seed.DisconnectReason.String, ""),
|
||||
Valid: takeFirst(seed.DisconnectReason.Valid, false),
|
||||
},
|
||||
SessionID: uuid.NullUUID{
|
||||
UUID: takeFirst(seed.SessionID.UUID, uuid.Nil),
|
||||
Valid: takeFirst(seed.SessionID.Valid, false),
|
||||
},
|
||||
ClientHostname: sql.NullString{
|
||||
String: takeFirst(seed.ClientHostname.String, ""),
|
||||
Valid: takeFirst(seed.ClientHostname.Valid, false),
|
||||
},
|
||||
ShortDescription: sql.NullString{
|
||||
String: takeFirst(seed.ShortDescription.String, ""),
|
||||
Valid: takeFirst(seed.ShortDescription.Valid, false),
|
||||
},
|
||||
ConnectionStatus: takeFirst(seed.ConnectionStatus, database.ConnectionStatusConnected),
|
||||
})
|
||||
require.NoError(t, err, "insert connection log")
|
||||
|
||||
@@ -231,6 +231,22 @@ func (m queryMetricsStore) CleanTailnetTunnels(ctx context.Context) error {
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) CloseConnectionLogsAndCreateSessions(ctx context.Context, arg database.CloseConnectionLogsAndCreateSessionsParams) (int64, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.CloseConnectionLogsAndCreateSessions(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("CloseConnectionLogsAndCreateSessions").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "CloseConnectionLogsAndCreateSessions").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) CloseOpenAgentConnectionLogsForWorkspace(ctx context.Context, arg database.CloseOpenAgentConnectionLogsForWorkspaceParams) (int64, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.CloseOpenAgentConnectionLogsForWorkspace(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("CloseOpenAgentConnectionLogsForWorkspace").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "CloseOpenAgentConnectionLogsForWorkspace").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) CountAIBridgeInterceptions(ctx context.Context, arg database.CountAIBridgeInterceptionsParams) (int64, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.CountAIBridgeInterceptions(ctx, arg)
|
||||
@@ -255,6 +271,14 @@ func (m queryMetricsStore) CountConnectionLogs(ctx context.Context, arg database
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) CountGlobalWorkspaceSessions(ctx context.Context, arg database.CountGlobalWorkspaceSessionsParams) (int64, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.CountGlobalWorkspaceSessions(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("CountGlobalWorkspaceSessions").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "CountGlobalWorkspaceSessions").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) CountInProgressPrebuilds(ctx context.Context) ([]database.CountInProgressPrebuildsRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.CountInProgressPrebuilds(ctx)
|
||||
@@ -279,6 +303,14 @@ func (m queryMetricsStore) CountUnreadInboxNotificationsByUserID(ctx context.Con
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) CountWorkspaceSessions(ctx context.Context, arg database.CountWorkspaceSessionsParams) (int64, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.CountWorkspaceSessions(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("CountWorkspaceSessions").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "CountWorkspaceSessions").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) CreateUserSecret(ctx context.Context, arg database.CreateUserSecretParams) (database.UserSecret, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.CreateUserSecret(ctx, arg)
|
||||
@@ -335,14 +367,6 @@ func (m queryMetricsStore) DeleteApplicationConnectAPIKeysByUserID(ctx context.C
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteBoundaryUsageStatsByReplicaID(ctx context.Context, replicaID uuid.UUID) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.DeleteBoundaryUsageStatsByReplicaID(ctx, replicaID)
|
||||
m.queryLatencies.WithLabelValues("DeleteBoundaryUsageStatsByReplicaID").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "DeleteBoundaryUsageStatsByReplicaID").Inc()
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteCryptoKey(ctx context.Context, arg database.DeleteCryptoKeyParams) (database.CryptoKey, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.DeleteCryptoKey(ctx, arg)
|
||||
@@ -575,7 +599,7 @@ func (m queryMetricsStore) DeleteTailnetTunnel(ctx context.Context, arg database
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteTask(ctx context.Context, arg database.DeleteTaskParams) (database.TaskTable, error) {
|
||||
func (m queryMetricsStore) DeleteTask(ctx context.Context, arg database.DeleteTaskParams) (uuid.UUID, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.DeleteTask(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("DeleteTask").Observe(time.Since(start).Seconds())
|
||||
@@ -726,6 +750,14 @@ func (m queryMetricsStore) FindMatchingPresetID(ctx context.Context, arg databas
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) FindOrCreateSessionForDisconnect(ctx context.Context, arg database.FindOrCreateSessionForDisconnectParams) (interface{}, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.FindOrCreateSessionForDisconnect(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("FindOrCreateSessionForDisconnect").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "FindOrCreateSessionForDisconnect").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetAIBridgeInterceptionByID(ctx context.Context, id uuid.UUID) (database.AIBridgeInterception, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetAIBridgeInterceptionByID(ctx, id)
|
||||
@@ -838,6 +870,14 @@ func (m queryMetricsStore) GetAllTailnetCoordinators(ctx context.Context) ([]dat
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetAllTailnetPeeringEventsByPeerID(ctx context.Context, srcPeerID uuid.NullUUID) ([]database.TailnetPeeringEvent, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetAllTailnetPeeringEventsByPeerID(ctx, srcPeerID)
|
||||
m.queryLatencies.WithLabelValues("GetAllTailnetPeeringEventsByPeerID").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "GetAllTailnetPeeringEventsByPeerID").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetAllTailnetPeers(ctx context.Context) ([]database.TailnetPeer, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetAllTailnetPeers(ctx)
|
||||
@@ -854,6 +894,14 @@ func (m queryMetricsStore) GetAllTailnetTunnels(ctx context.Context) ([]database
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetAndResetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (database.GetAndResetBoundaryUsageSummaryRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetAndResetBoundaryUsageSummary(ctx, maxStalenessMs)
|
||||
m.queryLatencies.WithLabelValues("GetAndResetBoundaryUsageSummary").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "GetAndResetBoundaryUsageSummary").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetAnnouncementBanners(ctx context.Context) (string, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetAnnouncementBanners(ctx)
|
||||
@@ -902,11 +950,19 @@ func (m queryMetricsStore) GetAuthorizationUserRoles(ctx context.Context, userID
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (database.GetBoundaryUsageSummaryRow, error) {
|
||||
func (m queryMetricsStore) GetConnectionLogByConnectionID(ctx context.Context, arg database.GetConnectionLogByConnectionIDParams) (database.ConnectionLog, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetBoundaryUsageSummary(ctx, maxStalenessMs)
|
||||
m.queryLatencies.WithLabelValues("GetBoundaryUsageSummary").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "GetBoundaryUsageSummary").Inc()
|
||||
r0, r1 := m.s.GetConnectionLogByConnectionID(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("GetConnectionLogByConnectionID").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "GetConnectionLogByConnectionID").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetConnectionLogsBySessionIDs(ctx context.Context, sessionIds []uuid.UUID) ([]database.ConnectionLog, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetConnectionLogsBySessionIDs(ctx, sessionIds)
|
||||
m.queryLatencies.WithLabelValues("GetConnectionLogsBySessionIDs").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "GetConnectionLogsBySessionIDs").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
@@ -1102,6 +1158,14 @@ func (m queryMetricsStore) GetGitSSHKey(ctx context.Context, userID uuid.UUID) (
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetGlobalWorkspaceSessionsOffset(ctx context.Context, arg database.GetGlobalWorkspaceSessionsOffsetParams) ([]database.GetGlobalWorkspaceSessionsOffsetRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetGlobalWorkspaceSessionsOffset(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("GetGlobalWorkspaceSessionsOffset").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "GetGlobalWorkspaceSessionsOffset").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetGroupByID(ctx context.Context, id uuid.UUID) (database.Group, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetGroupByID(ctx, id)
|
||||
@@ -1398,6 +1462,14 @@ func (m queryMetricsStore) GetOAuthSigningKey(ctx context.Context) (string, erro
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetOngoingAgentConnectionsLast24h(ctx context.Context, arg database.GetOngoingAgentConnectionsLast24hParams) ([]database.GetOngoingAgentConnectionsLast24hRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetOngoingAgentConnectionsLast24h(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("GetOngoingAgentConnectionsLast24h").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "GetOngoingAgentConnectionsLast24h").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetOrganizationByID(ctx context.Context, id uuid.UUID) (database.Organization, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetOrganizationByID(ctx, id)
|
||||
@@ -1742,6 +1814,14 @@ func (m queryMetricsStore) GetTailnetTunnelPeerBindings(ctx context.Context, src
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetTailnetTunnelPeerBindingsByDstID(ctx context.Context, dstID uuid.UUID) ([]database.GetTailnetTunnelPeerBindingsByDstIDRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetTailnetTunnelPeerBindingsByDstID(ctx, dstID)
|
||||
m.queryLatencies.WithLabelValues("GetTailnetTunnelPeerBindingsByDstID").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "GetTailnetTunnelPeerBindingsByDstID").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetTailnetTunnelPeerIDs(ctx context.Context, srcID uuid.UUID) ([]database.GetTailnetTunnelPeerIDsRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetTailnetTunnelPeerIDs(ctx, srcID)
|
||||
@@ -2414,6 +2494,14 @@ func (m queryMetricsStore) GetWorkspaceBuildByWorkspaceIDAndBuildNumber(ctx cont
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetWorkspaceBuildMetricsByResourceID(ctx context.Context, id uuid.UUID) (database.GetWorkspaceBuildMetricsByResourceIDRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetWorkspaceBuildMetricsByResourceID(ctx, id)
|
||||
m.queryLatencies.WithLabelValues("GetWorkspaceBuildMetricsByResourceID").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "GetWorkspaceBuildMetricsByResourceID").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetWorkspaceBuildParameters(ctx context.Context, workspaceBuildID uuid.UUID) ([]database.WorkspaceBuildParameter, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetWorkspaceBuildParameters(ctx, workspaceBuildID)
|
||||
@@ -2590,6 +2678,14 @@ func (m queryMetricsStore) GetWorkspaceResourcesCreatedAfter(ctx context.Context
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetWorkspaceSessionsOffset(ctx context.Context, arg database.GetWorkspaceSessionsOffsetParams) ([]database.GetWorkspaceSessionsOffsetRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetWorkspaceSessionsOffset(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("GetWorkspaceSessionsOffset").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "GetWorkspaceSessionsOffset").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetWorkspaceUniqueOwnerCountByTemplateIDs(ctx context.Context, templateIds []uuid.UUID) ([]database.GetWorkspaceUniqueOwnerCountByTemplateIDsRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetWorkspaceUniqueOwnerCountByTemplateIDs(ctx, templateIds)
|
||||
@@ -2918,6 +3014,14 @@ func (m queryMetricsStore) InsertReplica(ctx context.Context, arg database.Inser
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) InsertTailnetPeeringEvent(ctx context.Context, arg database.InsertTailnetPeeringEventParams) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.InsertTailnetPeeringEvent(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("InsertTailnetPeeringEvent").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "InsertTailnetPeeringEvent").Inc()
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) InsertTask(ctx context.Context, arg database.InsertTaskParams) (database.TaskTable, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.InsertTask(ctx, arg)
|
||||
@@ -3334,14 +3438,6 @@ func (m queryMetricsStore) RemoveUserFromGroups(ctx context.Context, arg databas
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) ResetBoundaryUsageStats(ctx context.Context) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.ResetBoundaryUsageStats(ctx)
|
||||
m.queryLatencies.WithLabelValues("ResetBoundaryUsageStats").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "ResetBoundaryUsageStats").Inc()
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) RevokeDBCryptKey(ctx context.Context, activeKeyDigest string) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.RevokeDBCryptKey(ctx, activeKeyDigest)
|
||||
@@ -3398,6 +3494,14 @@ func (m queryMetricsStore) UpdateAPIKeyByID(ctx context.Context, arg database.Up
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpdateConnectionLogSessionID(ctx context.Context, arg database.UpdateConnectionLogSessionIDParams) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.UpdateConnectionLogSessionID(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("UpdateConnectionLogSessionID").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "UpdateConnectionLogSessionID").Inc()
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpdateCryptoKeyDeletesAt(ctx context.Context, arg database.UpdateCryptoKeyDeletesAtParams) (database.CryptoKey, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.UpdateCryptoKeyDeletesAt(ctx, arg)
|
||||
@@ -3909,6 +4013,14 @@ func (m queryMetricsStore) UpdateWorkspaceAgentConnectionByID(ctx context.Contex
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpdateWorkspaceAgentDisplayAppsByID(ctx context.Context, arg database.UpdateWorkspaceAgentDisplayAppsByIDParams) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.UpdateWorkspaceAgentDisplayAppsByID(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("UpdateWorkspaceAgentDisplayAppsByID").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "UpdateWorkspaceAgentDisplayAppsByID").Inc()
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpdateWorkspaceAgentLifecycleStateByID(ctx context.Context, arg database.UpdateWorkspaceAgentLifecycleStateByIDParams) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.UpdateWorkspaceAgentLifecycleStateByID(ctx, arg)
|
||||
@@ -4285,7 +4397,7 @@ func (m queryMetricsStore) UpsertWorkspaceApp(ctx context.Context, arg database.
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpsertWorkspaceAppAuditSession(ctx context.Context, arg database.UpsertWorkspaceAppAuditSessionParams) (bool, error) {
|
||||
func (m queryMetricsStore) UpsertWorkspaceAppAuditSession(ctx context.Context, arg database.UpsertWorkspaceAppAuditSessionParams) (database.UpsertWorkspaceAppAuditSessionRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.UpsertWorkspaceAppAuditSession(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("UpsertWorkspaceAppAuditSession").Observe(time.Since(start).Seconds())
|
||||
|
||||
@@ -276,6 +276,36 @@ func (mr *MockStoreMockRecorder) CleanTailnetTunnels(ctx any) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanTailnetTunnels", reflect.TypeOf((*MockStore)(nil).CleanTailnetTunnels), ctx)
|
||||
}
|
||||
|
||||
// CloseConnectionLogsAndCreateSessions mocks base method.
|
||||
func (m *MockStore) CloseConnectionLogsAndCreateSessions(ctx context.Context, arg database.CloseConnectionLogsAndCreateSessionsParams) (int64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "CloseConnectionLogsAndCreateSessions", ctx, arg)
|
||||
ret0, _ := ret[0].(int64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// CloseConnectionLogsAndCreateSessions indicates an expected call of CloseConnectionLogsAndCreateSessions.
|
||||
func (mr *MockStoreMockRecorder) CloseConnectionLogsAndCreateSessions(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseConnectionLogsAndCreateSessions", reflect.TypeOf((*MockStore)(nil).CloseConnectionLogsAndCreateSessions), ctx, arg)
|
||||
}
|
||||
|
||||
// CloseOpenAgentConnectionLogsForWorkspace mocks base method.
|
||||
func (m *MockStore) CloseOpenAgentConnectionLogsForWorkspace(ctx context.Context, arg database.CloseOpenAgentConnectionLogsForWorkspaceParams) (int64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "CloseOpenAgentConnectionLogsForWorkspace", ctx, arg)
|
||||
ret0, _ := ret[0].(int64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// CloseOpenAgentConnectionLogsForWorkspace indicates an expected call of CloseOpenAgentConnectionLogsForWorkspace.
|
||||
func (mr *MockStoreMockRecorder) CloseOpenAgentConnectionLogsForWorkspace(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseOpenAgentConnectionLogsForWorkspace", reflect.TypeOf((*MockStore)(nil).CloseOpenAgentConnectionLogsForWorkspace), ctx, arg)
|
||||
}
|
||||
|
||||
// CountAIBridgeInterceptions mocks base method.
|
||||
func (m *MockStore) CountAIBridgeInterceptions(ctx context.Context, arg database.CountAIBridgeInterceptionsParams) (int64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -366,6 +396,21 @@ func (mr *MockStoreMockRecorder) CountConnectionLogs(ctx, arg any) *gomock.Call
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountConnectionLogs", reflect.TypeOf((*MockStore)(nil).CountConnectionLogs), ctx, arg)
|
||||
}
|
||||
|
||||
// CountGlobalWorkspaceSessions mocks base method.
|
||||
func (m *MockStore) CountGlobalWorkspaceSessions(ctx context.Context, arg database.CountGlobalWorkspaceSessionsParams) (int64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "CountGlobalWorkspaceSessions", ctx, arg)
|
||||
ret0, _ := ret[0].(int64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// CountGlobalWorkspaceSessions indicates an expected call of CountGlobalWorkspaceSessions.
|
||||
func (mr *MockStoreMockRecorder) CountGlobalWorkspaceSessions(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountGlobalWorkspaceSessions", reflect.TypeOf((*MockStore)(nil).CountGlobalWorkspaceSessions), ctx, arg)
|
||||
}
|
||||
|
||||
// CountInProgressPrebuilds mocks base method.
|
||||
func (m *MockStore) CountInProgressPrebuilds(ctx context.Context) ([]database.CountInProgressPrebuildsRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -411,6 +456,21 @@ func (mr *MockStoreMockRecorder) CountUnreadInboxNotificationsByUserID(ctx, user
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountUnreadInboxNotificationsByUserID", reflect.TypeOf((*MockStore)(nil).CountUnreadInboxNotificationsByUserID), ctx, userID)
|
||||
}
|
||||
|
||||
// CountWorkspaceSessions mocks base method.
|
||||
func (m *MockStore) CountWorkspaceSessions(ctx context.Context, arg database.CountWorkspaceSessionsParams) (int64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "CountWorkspaceSessions", ctx, arg)
|
||||
ret0, _ := ret[0].(int64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// CountWorkspaceSessions indicates an expected call of CountWorkspaceSessions.
|
||||
func (mr *MockStoreMockRecorder) CountWorkspaceSessions(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountWorkspaceSessions", reflect.TypeOf((*MockStore)(nil).CountWorkspaceSessions), ctx, arg)
|
||||
}
|
||||
|
||||
// CreateUserSecret mocks base method.
|
||||
func (m *MockStore) CreateUserSecret(ctx context.Context, arg database.CreateUserSecretParams) (database.UserSecret, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -511,20 +571,6 @@ func (mr *MockStoreMockRecorder) DeleteApplicationConnectAPIKeysByUserID(ctx, us
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteApplicationConnectAPIKeysByUserID", reflect.TypeOf((*MockStore)(nil).DeleteApplicationConnectAPIKeysByUserID), ctx, userID)
|
||||
}
|
||||
|
||||
// DeleteBoundaryUsageStatsByReplicaID mocks base method.
|
||||
func (m *MockStore) DeleteBoundaryUsageStatsByReplicaID(ctx context.Context, replicaID uuid.UUID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DeleteBoundaryUsageStatsByReplicaID", ctx, replicaID)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// DeleteBoundaryUsageStatsByReplicaID indicates an expected call of DeleteBoundaryUsageStatsByReplicaID.
|
||||
func (mr *MockStoreMockRecorder) DeleteBoundaryUsageStatsByReplicaID(ctx, replicaID any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBoundaryUsageStatsByReplicaID", reflect.TypeOf((*MockStore)(nil).DeleteBoundaryUsageStatsByReplicaID), ctx, replicaID)
|
||||
}
|
||||
|
||||
// DeleteCryptoKey mocks base method.
|
||||
func (m *MockStore) DeleteCryptoKey(ctx context.Context, arg database.DeleteCryptoKeyParams) (database.CryptoKey, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -941,10 +987,10 @@ func (mr *MockStoreMockRecorder) DeleteTailnetTunnel(ctx, arg any) *gomock.Call
|
||||
}
|
||||
|
||||
// DeleteTask mocks base method.
|
||||
func (m *MockStore) DeleteTask(ctx context.Context, arg database.DeleteTaskParams) (database.TaskTable, error) {
|
||||
func (m *MockStore) DeleteTask(ctx context.Context, arg database.DeleteTaskParams) (uuid.UUID, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DeleteTask", ctx, arg)
|
||||
ret0, _ := ret[0].(database.TaskTable)
|
||||
ret0, _ := ret[0].(uuid.UUID)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
@@ -1213,6 +1259,21 @@ func (mr *MockStoreMockRecorder) FindMatchingPresetID(ctx, arg any) *gomock.Call
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindMatchingPresetID", reflect.TypeOf((*MockStore)(nil).FindMatchingPresetID), ctx, arg)
|
||||
}
|
||||
|
||||
// FindOrCreateSessionForDisconnect mocks base method.
|
||||
func (m *MockStore) FindOrCreateSessionForDisconnect(ctx context.Context, arg database.FindOrCreateSessionForDisconnectParams) (any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "FindOrCreateSessionForDisconnect", ctx, arg)
|
||||
ret0, _ := ret[0].(any)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// FindOrCreateSessionForDisconnect indicates an expected call of FindOrCreateSessionForDisconnect.
|
||||
func (mr *MockStoreMockRecorder) FindOrCreateSessionForDisconnect(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOrCreateSessionForDisconnect", reflect.TypeOf((*MockStore)(nil).FindOrCreateSessionForDisconnect), ctx, arg)
|
||||
}
|
||||
|
||||
// GetAIBridgeInterceptionByID mocks base method.
|
||||
func (m *MockStore) GetAIBridgeInterceptionByID(ctx context.Context, id uuid.UUID) (database.AIBridgeInterception, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -1423,6 +1484,21 @@ func (mr *MockStoreMockRecorder) GetAllTailnetCoordinators(ctx any) *gomock.Call
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllTailnetCoordinators", reflect.TypeOf((*MockStore)(nil).GetAllTailnetCoordinators), ctx)
|
||||
}
|
||||
|
||||
// GetAllTailnetPeeringEventsByPeerID mocks base method.
|
||||
func (m *MockStore) GetAllTailnetPeeringEventsByPeerID(ctx context.Context, srcPeerID uuid.NullUUID) ([]database.TailnetPeeringEvent, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetAllTailnetPeeringEventsByPeerID", ctx, srcPeerID)
|
||||
ret0, _ := ret[0].([]database.TailnetPeeringEvent)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetAllTailnetPeeringEventsByPeerID indicates an expected call of GetAllTailnetPeeringEventsByPeerID.
|
||||
func (mr *MockStoreMockRecorder) GetAllTailnetPeeringEventsByPeerID(ctx, srcPeerID any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllTailnetPeeringEventsByPeerID", reflect.TypeOf((*MockStore)(nil).GetAllTailnetPeeringEventsByPeerID), ctx, srcPeerID)
|
||||
}
|
||||
|
||||
// GetAllTailnetPeers mocks base method.
|
||||
func (m *MockStore) GetAllTailnetPeers(ctx context.Context) ([]database.TailnetPeer, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -1453,6 +1529,21 @@ func (mr *MockStoreMockRecorder) GetAllTailnetTunnels(ctx any) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllTailnetTunnels", reflect.TypeOf((*MockStore)(nil).GetAllTailnetTunnels), ctx)
|
||||
}
|
||||
|
||||
// GetAndResetBoundaryUsageSummary mocks base method.
|
||||
func (m *MockStore) GetAndResetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (database.GetAndResetBoundaryUsageSummaryRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetAndResetBoundaryUsageSummary", ctx, maxStalenessMs)
|
||||
ret0, _ := ret[0].(database.GetAndResetBoundaryUsageSummaryRow)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetAndResetBoundaryUsageSummary indicates an expected call of GetAndResetBoundaryUsageSummary.
|
||||
func (mr *MockStoreMockRecorder) GetAndResetBoundaryUsageSummary(ctx, maxStalenessMs any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAndResetBoundaryUsageSummary", reflect.TypeOf((*MockStore)(nil).GetAndResetBoundaryUsageSummary), ctx, maxStalenessMs)
|
||||
}
|
||||
|
||||
// GetAnnouncementBanners mocks base method.
|
||||
func (m *MockStore) GetAnnouncementBanners(ctx context.Context) (string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -1648,19 +1739,34 @@ func (mr *MockStoreMockRecorder) GetAuthorizedWorkspacesAndAgentsByOwnerID(ctx,
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAuthorizedWorkspacesAndAgentsByOwnerID", reflect.TypeOf((*MockStore)(nil).GetAuthorizedWorkspacesAndAgentsByOwnerID), ctx, ownerID, prepared)
|
||||
}
|
||||
|
||||
// GetBoundaryUsageSummary mocks base method.
|
||||
func (m *MockStore) GetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (database.GetBoundaryUsageSummaryRow, error) {
|
||||
// GetConnectionLogByConnectionID mocks base method.
|
||||
func (m *MockStore) GetConnectionLogByConnectionID(ctx context.Context, arg database.GetConnectionLogByConnectionIDParams) (database.ConnectionLog, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetBoundaryUsageSummary", ctx, maxStalenessMs)
|
||||
ret0, _ := ret[0].(database.GetBoundaryUsageSummaryRow)
|
||||
ret := m.ctrl.Call(m, "GetConnectionLogByConnectionID", ctx, arg)
|
||||
ret0, _ := ret[0].(database.ConnectionLog)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetBoundaryUsageSummary indicates an expected call of GetBoundaryUsageSummary.
|
||||
func (mr *MockStoreMockRecorder) GetBoundaryUsageSummary(ctx, maxStalenessMs any) *gomock.Call {
|
||||
// GetConnectionLogByConnectionID indicates an expected call of GetConnectionLogByConnectionID.
|
||||
func (mr *MockStoreMockRecorder) GetConnectionLogByConnectionID(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBoundaryUsageSummary", reflect.TypeOf((*MockStore)(nil).GetBoundaryUsageSummary), ctx, maxStalenessMs)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConnectionLogByConnectionID", reflect.TypeOf((*MockStore)(nil).GetConnectionLogByConnectionID), ctx, arg)
|
||||
}
|
||||
|
||||
// GetConnectionLogsBySessionIDs mocks base method.
|
||||
func (m *MockStore) GetConnectionLogsBySessionIDs(ctx context.Context, sessionIds []uuid.UUID) ([]database.ConnectionLog, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetConnectionLogsBySessionIDs", ctx, sessionIds)
|
||||
ret0, _ := ret[0].([]database.ConnectionLog)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetConnectionLogsBySessionIDs indicates an expected call of GetConnectionLogsBySessionIDs.
|
||||
func (mr *MockStoreMockRecorder) GetConnectionLogsBySessionIDs(ctx, sessionIds any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConnectionLogsBySessionIDs", reflect.TypeOf((*MockStore)(nil).GetConnectionLogsBySessionIDs), ctx, sessionIds)
|
||||
}
|
||||
|
||||
// GetConnectionLogsOffset mocks base method.
|
||||
@@ -2023,6 +2129,21 @@ func (mr *MockStoreMockRecorder) GetGitSSHKey(ctx, userID any) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGitSSHKey", reflect.TypeOf((*MockStore)(nil).GetGitSSHKey), ctx, userID)
|
||||
}
|
||||
|
||||
// GetGlobalWorkspaceSessionsOffset mocks base method.
|
||||
func (m *MockStore) GetGlobalWorkspaceSessionsOffset(ctx context.Context, arg database.GetGlobalWorkspaceSessionsOffsetParams) ([]database.GetGlobalWorkspaceSessionsOffsetRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetGlobalWorkspaceSessionsOffset", ctx, arg)
|
||||
ret0, _ := ret[0].([]database.GetGlobalWorkspaceSessionsOffsetRow)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetGlobalWorkspaceSessionsOffset indicates an expected call of GetGlobalWorkspaceSessionsOffset.
|
||||
func (mr *MockStoreMockRecorder) GetGlobalWorkspaceSessionsOffset(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGlobalWorkspaceSessionsOffset", reflect.TypeOf((*MockStore)(nil).GetGlobalWorkspaceSessionsOffset), ctx, arg)
|
||||
}
|
||||
|
||||
// GetGroupByID mocks base method.
|
||||
func (m *MockStore) GetGroupByID(ctx context.Context, id uuid.UUID) (database.Group, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -2578,6 +2699,21 @@ func (mr *MockStoreMockRecorder) GetOAuthSigningKey(ctx any) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOAuthSigningKey", reflect.TypeOf((*MockStore)(nil).GetOAuthSigningKey), ctx)
|
||||
}
|
||||
|
||||
// GetOngoingAgentConnectionsLast24h mocks base method.
|
||||
func (m *MockStore) GetOngoingAgentConnectionsLast24h(ctx context.Context, arg database.GetOngoingAgentConnectionsLast24hParams) ([]database.GetOngoingAgentConnectionsLast24hRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetOngoingAgentConnectionsLast24h", ctx, arg)
|
||||
ret0, _ := ret[0].([]database.GetOngoingAgentConnectionsLast24hRow)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetOngoingAgentConnectionsLast24h indicates an expected call of GetOngoingAgentConnectionsLast24h.
|
||||
func (mr *MockStoreMockRecorder) GetOngoingAgentConnectionsLast24h(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOngoingAgentConnectionsLast24h", reflect.TypeOf((*MockStore)(nil).GetOngoingAgentConnectionsLast24h), ctx, arg)
|
||||
}
|
||||
|
||||
// GetOrganizationByID mocks base method.
|
||||
func (m *MockStore) GetOrganizationByID(ctx context.Context, id uuid.UUID) (database.Organization, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -3223,6 +3359,21 @@ func (mr *MockStoreMockRecorder) GetTailnetTunnelPeerBindings(ctx, srcID any) *g
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTailnetTunnelPeerBindings", reflect.TypeOf((*MockStore)(nil).GetTailnetTunnelPeerBindings), ctx, srcID)
|
||||
}
|
||||
|
||||
// GetTailnetTunnelPeerBindingsByDstID mocks base method.
|
||||
func (m *MockStore) GetTailnetTunnelPeerBindingsByDstID(ctx context.Context, dstID uuid.UUID) ([]database.GetTailnetTunnelPeerBindingsByDstIDRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetTailnetTunnelPeerBindingsByDstID", ctx, dstID)
|
||||
ret0, _ := ret[0].([]database.GetTailnetTunnelPeerBindingsByDstIDRow)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetTailnetTunnelPeerBindingsByDstID indicates an expected call of GetTailnetTunnelPeerBindingsByDstID.
|
||||
func (mr *MockStoreMockRecorder) GetTailnetTunnelPeerBindingsByDstID(ctx, dstID any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTailnetTunnelPeerBindingsByDstID", reflect.TypeOf((*MockStore)(nil).GetTailnetTunnelPeerBindingsByDstID), ctx, dstID)
|
||||
}
|
||||
|
||||
// GetTailnetTunnelPeerIDs mocks base method.
|
||||
func (m *MockStore) GetTailnetTunnelPeerIDs(ctx context.Context, srcID uuid.UUID) ([]database.GetTailnetTunnelPeerIDsRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -4513,6 +4664,21 @@ func (mr *MockStoreMockRecorder) GetWorkspaceBuildByWorkspaceIDAndBuildNumber(ct
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkspaceBuildByWorkspaceIDAndBuildNumber", reflect.TypeOf((*MockStore)(nil).GetWorkspaceBuildByWorkspaceIDAndBuildNumber), ctx, arg)
|
||||
}
|
||||
|
||||
// GetWorkspaceBuildMetricsByResourceID mocks base method.
|
||||
func (m *MockStore) GetWorkspaceBuildMetricsByResourceID(ctx context.Context, id uuid.UUID) (database.GetWorkspaceBuildMetricsByResourceIDRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetWorkspaceBuildMetricsByResourceID", ctx, id)
|
||||
ret0, _ := ret[0].(database.GetWorkspaceBuildMetricsByResourceIDRow)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetWorkspaceBuildMetricsByResourceID indicates an expected call of GetWorkspaceBuildMetricsByResourceID.
|
||||
func (mr *MockStoreMockRecorder) GetWorkspaceBuildMetricsByResourceID(ctx, id any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkspaceBuildMetricsByResourceID", reflect.TypeOf((*MockStore)(nil).GetWorkspaceBuildMetricsByResourceID), ctx, id)
|
||||
}
|
||||
|
||||
// GetWorkspaceBuildParameters mocks base method.
|
||||
func (m *MockStore) GetWorkspaceBuildParameters(ctx context.Context, workspaceBuildID uuid.UUID) ([]database.WorkspaceBuildParameter, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -4843,6 +5009,21 @@ func (mr *MockStoreMockRecorder) GetWorkspaceResourcesCreatedAfter(ctx, createdA
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkspaceResourcesCreatedAfter", reflect.TypeOf((*MockStore)(nil).GetWorkspaceResourcesCreatedAfter), ctx, createdAt)
|
||||
}
|
||||
|
||||
// GetWorkspaceSessionsOffset mocks base method.
|
||||
func (m *MockStore) GetWorkspaceSessionsOffset(ctx context.Context, arg database.GetWorkspaceSessionsOffsetParams) ([]database.GetWorkspaceSessionsOffsetRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetWorkspaceSessionsOffset", ctx, arg)
|
||||
ret0, _ := ret[0].([]database.GetWorkspaceSessionsOffsetRow)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetWorkspaceSessionsOffset indicates an expected call of GetWorkspaceSessionsOffset.
|
||||
func (mr *MockStoreMockRecorder) GetWorkspaceSessionsOffset(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkspaceSessionsOffset", reflect.TypeOf((*MockStore)(nil).GetWorkspaceSessionsOffset), ctx, arg)
|
||||
}
|
||||
|
||||
// GetWorkspaceUniqueOwnerCountByTemplateIDs mocks base method.
|
||||
func (m *MockStore) GetWorkspaceUniqueOwnerCountByTemplateIDs(ctx context.Context, templateIds []uuid.UUID) ([]database.GetWorkspaceUniqueOwnerCountByTemplateIDsRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -5468,6 +5649,20 @@ func (mr *MockStoreMockRecorder) InsertReplica(ctx, arg any) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertReplica", reflect.TypeOf((*MockStore)(nil).InsertReplica), ctx, arg)
|
||||
}
|
||||
|
||||
// InsertTailnetPeeringEvent mocks base method.
|
||||
func (m *MockStore) InsertTailnetPeeringEvent(ctx context.Context, arg database.InsertTailnetPeeringEventParams) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "InsertTailnetPeeringEvent", ctx, arg)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// InsertTailnetPeeringEvent indicates an expected call of InsertTailnetPeeringEvent.
|
||||
func (mr *MockStoreMockRecorder) InsertTailnetPeeringEvent(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertTailnetPeeringEvent", reflect.TypeOf((*MockStore)(nil).InsertTailnetPeeringEvent), ctx, arg)
|
||||
}
|
||||
|
||||
// InsertTask mocks base method.
|
||||
func (m *MockStore) InsertTask(ctx context.Context, arg database.InsertTaskParams) (database.TaskTable, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -6278,20 +6473,6 @@ func (mr *MockStoreMockRecorder) RemoveUserFromGroups(ctx, arg any) *gomock.Call
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveUserFromGroups", reflect.TypeOf((*MockStore)(nil).RemoveUserFromGroups), ctx, arg)
|
||||
}
|
||||
|
||||
// ResetBoundaryUsageStats mocks base method.
|
||||
func (m *MockStore) ResetBoundaryUsageStats(ctx context.Context) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResetBoundaryUsageStats", ctx)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ResetBoundaryUsageStats indicates an expected call of ResetBoundaryUsageStats.
|
||||
func (mr *MockStoreMockRecorder) ResetBoundaryUsageStats(ctx any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetBoundaryUsageStats", reflect.TypeOf((*MockStore)(nil).ResetBoundaryUsageStats), ctx)
|
||||
}
|
||||
|
||||
// RevokeDBCryptKey mocks base method.
|
||||
func (m *MockStore) RevokeDBCryptKey(ctx context.Context, activeKeyDigest string) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -6393,6 +6574,20 @@ func (mr *MockStoreMockRecorder) UpdateAPIKeyByID(ctx, arg any) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAPIKeyByID", reflect.TypeOf((*MockStore)(nil).UpdateAPIKeyByID), ctx, arg)
|
||||
}
|
||||
|
||||
// UpdateConnectionLogSessionID mocks base method.
|
||||
func (m *MockStore) UpdateConnectionLogSessionID(ctx context.Context, arg database.UpdateConnectionLogSessionIDParams) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UpdateConnectionLogSessionID", ctx, arg)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// UpdateConnectionLogSessionID indicates an expected call of UpdateConnectionLogSessionID.
|
||||
func (mr *MockStoreMockRecorder) UpdateConnectionLogSessionID(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateConnectionLogSessionID", reflect.TypeOf((*MockStore)(nil).UpdateConnectionLogSessionID), ctx, arg)
|
||||
}
|
||||
|
||||
// UpdateCryptoKeyDeletesAt mocks base method.
|
||||
func (m *MockStore) UpdateCryptoKeyDeletesAt(ctx context.Context, arg database.UpdateCryptoKeyDeletesAtParams) (database.CryptoKey, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -7321,6 +7516,20 @@ func (mr *MockStoreMockRecorder) UpdateWorkspaceAgentConnectionByID(ctx, arg any
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateWorkspaceAgentConnectionByID", reflect.TypeOf((*MockStore)(nil).UpdateWorkspaceAgentConnectionByID), ctx, arg)
|
||||
}
|
||||
|
||||
// UpdateWorkspaceAgentDisplayAppsByID mocks base method.
|
||||
func (m *MockStore) UpdateWorkspaceAgentDisplayAppsByID(ctx context.Context, arg database.UpdateWorkspaceAgentDisplayAppsByIDParams) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UpdateWorkspaceAgentDisplayAppsByID", ctx, arg)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// UpdateWorkspaceAgentDisplayAppsByID indicates an expected call of UpdateWorkspaceAgentDisplayAppsByID.
|
||||
func (mr *MockStoreMockRecorder) UpdateWorkspaceAgentDisplayAppsByID(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateWorkspaceAgentDisplayAppsByID", reflect.TypeOf((*MockStore)(nil).UpdateWorkspaceAgentDisplayAppsByID), ctx, arg)
|
||||
}
|
||||
|
||||
// UpdateWorkspaceAgentLifecycleStateByID mocks base method.
|
||||
func (m *MockStore) UpdateWorkspaceAgentLifecycleStateByID(ctx context.Context, arg database.UpdateWorkspaceAgentLifecycleStateByIDParams) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -7992,10 +8201,10 @@ func (mr *MockStoreMockRecorder) UpsertWorkspaceApp(ctx, arg any) *gomock.Call {
|
||||
}
|
||||
|
||||
// UpsertWorkspaceAppAuditSession mocks base method.
|
||||
func (m *MockStore) UpsertWorkspaceAppAuditSession(ctx context.Context, arg database.UpsertWorkspaceAppAuditSessionParams) (bool, error) {
|
||||
func (m *MockStore) UpsertWorkspaceAppAuditSession(ctx context.Context, arg database.UpsertWorkspaceAppAuditSessionParams) (database.UpsertWorkspaceAppAuditSessionRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UpsertWorkspaceAppAuditSession", ctx, arg)
|
||||
ret0, _ := ret[0].(bool)
|
||||
ret0, _ := ret[0].(database.UpsertWorkspaceAppAuditSessionRow)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
Generated
+69
-6
@@ -208,7 +208,9 @@ CREATE TYPE api_key_scope AS ENUM (
|
||||
'boundary_usage:*',
|
||||
'boundary_usage:delete',
|
||||
'boundary_usage:read',
|
||||
'boundary_usage:update'
|
||||
'boundary_usage:update',
|
||||
'workspace:update_agent',
|
||||
'workspace_dormant:update_agent'
|
||||
);
|
||||
|
||||
CREATE TYPE app_sharing_level AS ENUM (
|
||||
@@ -269,7 +271,8 @@ CREATE TYPE connection_type AS ENUM (
|
||||
'jetbrains',
|
||||
'reconnecting_pty',
|
||||
'workspace_app',
|
||||
'port_forwarding'
|
||||
'port_forwarding',
|
||||
'system'
|
||||
);
|
||||
|
||||
CREATE TYPE cors_behavior AS ENUM (
|
||||
@@ -1013,6 +1016,11 @@ BEGIN
|
||||
END;
|
||||
$$;
|
||||
|
||||
CREATE TABLE agent_peering_ids (
|
||||
agent_id uuid NOT NULL,
|
||||
peering_id bytea NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE aibridge_interceptions (
|
||||
id uuid NOT NULL,
|
||||
initiator_id uuid NOT NULL,
|
||||
@@ -1157,7 +1165,13 @@ CREATE TABLE connection_logs (
|
||||
slug_or_port text,
|
||||
connection_id uuid,
|
||||
disconnect_time timestamp with time zone,
|
||||
disconnect_reason text
|
||||
disconnect_reason text,
|
||||
agent_id uuid,
|
||||
updated_at timestamp with time zone DEFAULT now() NOT NULL,
|
||||
session_id uuid,
|
||||
client_hostname text,
|
||||
short_description text,
|
||||
os text
|
||||
);
|
||||
|
||||
COMMENT ON COLUMN connection_logs.code IS 'Either the HTTP status code of the web request, or the exit code of an SSH connection. For non-web connections, this is Null until we receive a disconnect event for the same connection_id.';
|
||||
@@ -1174,6 +1188,8 @@ COMMENT ON COLUMN connection_logs.disconnect_time IS 'The time the connection wa
|
||||
|
||||
COMMENT ON COLUMN connection_logs.disconnect_reason IS 'The reason the connection was closed. Null for web connections. For other connections, this is null until we receive a disconnect event for the same connection_id.';
|
||||
|
||||
COMMENT ON COLUMN connection_logs.updated_at IS 'Last time this connection log was confirmed active. For agent connections, equals connect_time. For web connections, bumped while the session is active.';
|
||||
|
||||
CREATE TABLE crypto_keys (
|
||||
feature crypto_key_feature NOT NULL,
|
||||
sequence integer NOT NULL,
|
||||
@@ -1769,6 +1785,15 @@ CREATE UNLOGGED TABLE tailnet_coordinators (
|
||||
|
||||
COMMENT ON TABLE tailnet_coordinators IS 'We keep this separate from replicas in case we need to break the coordinator out into its own service';
|
||||
|
||||
CREATE TABLE tailnet_peering_events (
|
||||
peering_id bytea NOT NULL,
|
||||
event_type text NOT NULL,
|
||||
src_peer_id uuid,
|
||||
dst_peer_id uuid,
|
||||
node bytea,
|
||||
occurred_at timestamp with time zone NOT NULL
|
||||
);
|
||||
|
||||
CREATE UNLOGGED TABLE tailnet_peers (
|
||||
id uuid NOT NULL,
|
||||
coordinator_id uuid NOT NULL,
|
||||
@@ -2002,7 +2027,7 @@ CREATE VIEW tasks_with_status AS
|
||||
WHEN (latest_build_raw.job_status IS NULL) THEN 'pending'::task_status
|
||||
WHEN (latest_build_raw.job_status = ANY (ARRAY['failed'::provisioner_job_status, 'canceling'::provisioner_job_status, 'canceled'::provisioner_job_status])) THEN 'error'::task_status
|
||||
WHEN ((latest_build_raw.transition = ANY (ARRAY['stop'::workspace_transition, 'delete'::workspace_transition])) AND (latest_build_raw.job_status = 'succeeded'::provisioner_job_status)) THEN 'paused'::task_status
|
||||
WHEN ((latest_build_raw.transition = 'start'::workspace_transition) AND (latest_build_raw.job_status = 'pending'::provisioner_job_status)) THEN 'initializing'::task_status
|
||||
WHEN ((latest_build_raw.transition = 'start'::workspace_transition) AND (latest_build_raw.job_status = 'pending'::provisioner_job_status)) THEN 'pending'::task_status
|
||||
WHEN ((latest_build_raw.transition = 'start'::workspace_transition) AND (latest_build_raw.job_status = ANY (ARRAY['running'::provisioner_job_status, 'succeeded'::provisioner_job_status]))) THEN 'active'::task_status
|
||||
ELSE 'unknown'::task_status
|
||||
END AS status) build_status)
|
||||
@@ -2288,7 +2313,8 @@ CREATE TABLE templates (
|
||||
activity_bump bigint DEFAULT '3600000000000'::bigint NOT NULL,
|
||||
max_port_sharing_level app_sharing_level DEFAULT 'owner'::app_sharing_level NOT NULL,
|
||||
use_classic_parameter_flow boolean DEFAULT false NOT NULL,
|
||||
cors_behavior cors_behavior DEFAULT 'simple'::cors_behavior NOT NULL
|
||||
cors_behavior cors_behavior DEFAULT 'simple'::cors_behavior NOT NULL,
|
||||
disable_module_cache boolean DEFAULT false NOT NULL
|
||||
);
|
||||
|
||||
COMMENT ON COLUMN templates.default_ttl IS 'The default duration for autostop for workspaces created from this template.';
|
||||
@@ -2342,6 +2368,7 @@ CREATE VIEW template_with_names AS
|
||||
templates.max_port_sharing_level,
|
||||
templates.use_classic_parameter_flow,
|
||||
templates.cors_behavior,
|
||||
templates.disable_module_cache,
|
||||
COALESCE(visible_users.avatar_url, ''::text) AS created_by_avatar_url,
|
||||
COALESCE(visible_users.username, ''::text) AS created_by_username,
|
||||
COALESCE(visible_users.name, ''::text) AS created_by_name,
|
||||
@@ -2600,7 +2627,8 @@ CREATE UNLOGGED TABLE workspace_app_audit_sessions (
|
||||
status_code integer NOT NULL,
|
||||
started_at timestamp with time zone NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
id uuid NOT NULL
|
||||
id uuid NOT NULL,
|
||||
connection_id uuid
|
||||
);
|
||||
|
||||
COMMENT ON TABLE workspace_app_audit_sessions IS 'Audit sessions for workspace apps, the data in this table is ephemeral and is used to deduplicate audit log entries for workspace apps. While a session is active, the same data will not be logged again. This table does not store historical data.';
|
||||
@@ -2894,6 +2922,18 @@ CREATE SEQUENCE workspace_resource_metadata_id_seq
|
||||
|
||||
ALTER SEQUENCE workspace_resource_metadata_id_seq OWNED BY workspace_resource_metadata.id;
|
||||
|
||||
CREATE TABLE workspace_sessions (
|
||||
id uuid DEFAULT gen_random_uuid() NOT NULL,
|
||||
workspace_id uuid NOT NULL,
|
||||
agent_id uuid,
|
||||
ip inet,
|
||||
client_hostname text,
|
||||
short_description text,
|
||||
started_at timestamp with time zone NOT NULL,
|
||||
ended_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone DEFAULT now() NOT NULL
|
||||
);
|
||||
|
||||
CREATE VIEW workspaces_expanded AS
|
||||
SELECT workspaces.id,
|
||||
workspaces.created_at,
|
||||
@@ -2951,6 +2991,9 @@ ALTER TABLE ONLY workspace_proxies ALTER COLUMN region_id SET DEFAULT nextval('w
|
||||
|
||||
ALTER TABLE ONLY workspace_resource_metadata ALTER COLUMN id SET DEFAULT nextval('workspace_resource_metadata_id_seq'::regclass);
|
||||
|
||||
ALTER TABLE ONLY agent_peering_ids
|
||||
ADD CONSTRAINT agent_peering_ids_pkey PRIMARY KEY (agent_id, peering_id);
|
||||
|
||||
ALTER TABLE ONLY workspace_agent_stats
|
||||
ADD CONSTRAINT agent_stats_pkey PRIMARY KEY (id);
|
||||
|
||||
@@ -3257,6 +3300,9 @@ ALTER TABLE ONLY workspace_resource_metadata
|
||||
ALTER TABLE ONLY workspace_resources
|
||||
ADD CONSTRAINT workspace_resources_pkey PRIMARY KEY (id);
|
||||
|
||||
ALTER TABLE ONLY workspace_sessions
|
||||
ADD CONSTRAINT workspace_sessions_pkey PRIMARY KEY (id);
|
||||
|
||||
ALTER TABLE ONLY workspaces
|
||||
ADD CONSTRAINT workspaces_pkey PRIMARY KEY (id);
|
||||
|
||||
@@ -3308,6 +3354,8 @@ COMMENT ON INDEX idx_connection_logs_connection_id_workspace_id_agent_name IS 'C
|
||||
|
||||
CREATE INDEX idx_connection_logs_organization_id ON connection_logs USING btree (organization_id);
|
||||
|
||||
CREATE INDEX idx_connection_logs_session ON connection_logs USING btree (session_id) WHERE (session_id IS NOT NULL);
|
||||
|
||||
CREATE INDEX idx_connection_logs_workspace_id ON connection_logs USING btree (workspace_id);
|
||||
|
||||
CREATE INDEX idx_connection_logs_workspace_owner_id ON connection_logs USING btree (workspace_owner_id);
|
||||
@@ -3362,6 +3410,12 @@ CREATE INDEX idx_workspace_app_statuses_workspace_id_created_at ON workspace_app
|
||||
|
||||
CREATE INDEX idx_workspace_builds_initiator_id ON workspace_builds USING btree (initiator_id);
|
||||
|
||||
CREATE INDEX idx_workspace_sessions_hostname_lookup ON workspace_sessions USING btree (workspace_id, client_hostname, started_at) WHERE (client_hostname IS NOT NULL);
|
||||
|
||||
CREATE INDEX idx_workspace_sessions_ip_lookup ON workspace_sessions USING btree (workspace_id, ip, started_at) WHERE ((ip IS NOT NULL) AND (client_hostname IS NULL));
|
||||
|
||||
CREATE INDEX idx_workspace_sessions_workspace ON workspace_sessions USING btree (workspace_id, started_at DESC);
|
||||
|
||||
CREATE UNIQUE INDEX notification_messages_dedupe_hash_idx ON notification_messages USING btree (dedupe_hash);
|
||||
|
||||
CREATE UNIQUE INDEX organizations_single_default_org ON organizations USING btree (is_default) WHERE (is_default = true);
|
||||
@@ -3549,6 +3603,9 @@ ALTER TABLE ONLY api_keys
|
||||
ALTER TABLE ONLY connection_logs
|
||||
ADD CONSTRAINT connection_logs_organization_id_fkey FOREIGN KEY (organization_id) REFERENCES organizations(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY connection_logs
|
||||
ADD CONSTRAINT connection_logs_session_id_fkey FOREIGN KEY (session_id) REFERENCES workspace_sessions(id) ON DELETE SET NULL;
|
||||
|
||||
ALTER TABLE ONLY connection_logs
|
||||
ADD CONSTRAINT connection_logs_workspace_id_fkey FOREIGN KEY (workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE;
|
||||
|
||||
@@ -3822,6 +3879,12 @@ ALTER TABLE ONLY workspace_resource_metadata
|
||||
ALTER TABLE ONLY workspace_resources
|
||||
ADD CONSTRAINT workspace_resources_job_id_fkey FOREIGN KEY (job_id) REFERENCES provisioner_jobs(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY workspace_sessions
|
||||
ADD CONSTRAINT workspace_sessions_agent_id_fkey FOREIGN KEY (agent_id) REFERENCES workspace_agents(id) ON DELETE SET NULL;
|
||||
|
||||
ALTER TABLE ONLY workspace_sessions
|
||||
ADD CONSTRAINT workspace_sessions_workspace_id_fkey FOREIGN KEY (workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY workspaces
|
||||
ADD CONSTRAINT workspaces_organization_id_fkey FOREIGN KEY (organization_id) REFERENCES organizations(id) ON DELETE RESTRICT;
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ const (
|
||||
ForeignKeyAibridgeInterceptionsInitiatorID ForeignKeyConstraint = "aibridge_interceptions_initiator_id_fkey" // ALTER TABLE ONLY aibridge_interceptions ADD CONSTRAINT aibridge_interceptions_initiator_id_fkey FOREIGN KEY (initiator_id) REFERENCES users(id);
|
||||
ForeignKeyAPIKeysUserIDUUID ForeignKeyConstraint = "api_keys_user_id_uuid_fkey" // ALTER TABLE ONLY api_keys ADD CONSTRAINT api_keys_user_id_uuid_fkey FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE;
|
||||
ForeignKeyConnectionLogsOrganizationID ForeignKeyConstraint = "connection_logs_organization_id_fkey" // ALTER TABLE ONLY connection_logs ADD CONSTRAINT connection_logs_organization_id_fkey FOREIGN KEY (organization_id) REFERENCES organizations(id) ON DELETE CASCADE;
|
||||
ForeignKeyConnectionLogsSessionID ForeignKeyConstraint = "connection_logs_session_id_fkey" // ALTER TABLE ONLY connection_logs ADD CONSTRAINT connection_logs_session_id_fkey FOREIGN KEY (session_id) REFERENCES workspace_sessions(id) ON DELETE SET NULL;
|
||||
ForeignKeyConnectionLogsWorkspaceID ForeignKeyConstraint = "connection_logs_workspace_id_fkey" // ALTER TABLE ONLY connection_logs ADD CONSTRAINT connection_logs_workspace_id_fkey FOREIGN KEY (workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE;
|
||||
ForeignKeyConnectionLogsWorkspaceOwnerID ForeignKeyConstraint = "connection_logs_workspace_owner_id_fkey" // ALTER TABLE ONLY connection_logs ADD CONSTRAINT connection_logs_workspace_owner_id_fkey FOREIGN KEY (workspace_owner_id) REFERENCES users(id) ON DELETE CASCADE;
|
||||
ForeignKeyCryptoKeysSecretKeyID ForeignKeyConstraint = "crypto_keys_secret_key_id_fkey" // ALTER TABLE ONLY crypto_keys ADD CONSTRAINT crypto_keys_secret_key_id_fkey FOREIGN KEY (secret_key_id) REFERENCES dbcrypt_keys(active_key_digest);
|
||||
@@ -100,6 +101,8 @@ const (
|
||||
ForeignKeyWorkspaceModulesJobID ForeignKeyConstraint = "workspace_modules_job_id_fkey" // ALTER TABLE ONLY workspace_modules ADD CONSTRAINT workspace_modules_job_id_fkey FOREIGN KEY (job_id) REFERENCES provisioner_jobs(id) ON DELETE CASCADE;
|
||||
ForeignKeyWorkspaceResourceMetadataWorkspaceResourceID ForeignKeyConstraint = "workspace_resource_metadata_workspace_resource_id_fkey" // ALTER TABLE ONLY workspace_resource_metadata ADD CONSTRAINT workspace_resource_metadata_workspace_resource_id_fkey FOREIGN KEY (workspace_resource_id) REFERENCES workspace_resources(id) ON DELETE CASCADE;
|
||||
ForeignKeyWorkspaceResourcesJobID ForeignKeyConstraint = "workspace_resources_job_id_fkey" // ALTER TABLE ONLY workspace_resources ADD CONSTRAINT workspace_resources_job_id_fkey FOREIGN KEY (job_id) REFERENCES provisioner_jobs(id) ON DELETE CASCADE;
|
||||
ForeignKeyWorkspaceSessionsAgentID ForeignKeyConstraint = "workspace_sessions_agent_id_fkey" // ALTER TABLE ONLY workspace_sessions ADD CONSTRAINT workspace_sessions_agent_id_fkey FOREIGN KEY (agent_id) REFERENCES workspace_agents(id) ON DELETE SET NULL;
|
||||
ForeignKeyWorkspaceSessionsWorkspaceID ForeignKeyConstraint = "workspace_sessions_workspace_id_fkey" // ALTER TABLE ONLY workspace_sessions ADD CONSTRAINT workspace_sessions_workspace_id_fkey FOREIGN KEY (workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE;
|
||||
ForeignKeyWorkspacesOrganizationID ForeignKeyConstraint = "workspaces_organization_id_fkey" // ALTER TABLE ONLY workspaces ADD CONSTRAINT workspaces_organization_id_fkey FOREIGN KEY (organization_id) REFERENCES organizations(id) ON DELETE RESTRICT;
|
||||
ForeignKeyWorkspacesOwnerID ForeignKeyConstraint = "workspaces_owner_id_fkey" // ALTER TABLE ONLY workspaces ADD CONSTRAINT workspaces_owner_id_fkey FOREIGN KEY (owner_id) REFERENCES users(id) ON DELETE RESTRICT;
|
||||
ForeignKeyWorkspacesTemplateID ForeignKeyConstraint = "workspaces_template_id_fkey" // ALTER TABLE ONLY workspaces ADD CONSTRAINT workspaces_template_id_fkey FOREIGN KEY (template_id) REFERENCES templates(id) ON DELETE RESTRICT;
|
||||
|
||||
@@ -14,6 +14,7 @@ const (
|
||||
LockIDCryptoKeyRotation
|
||||
LockIDReconcilePrebuilds
|
||||
LockIDReconcileSystemRoles
|
||||
LockIDBoundaryUsageStats
|
||||
)
|
||||
|
||||
// GenLockID generates a unique and consistent lock ID from a given string.
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
-- No-op for update agent scopes: keep enum values to avoid dependency churn.
|
||||
@@ -0,0 +1,2 @@
|
||||
ALTER TYPE api_key_scope ADD VALUE IF NOT EXISTS 'workspace:update_agent';
|
||||
ALTER TYPE api_key_scope ADD VALUE IF NOT EXISTS 'workspace_dormant:update_agent';
|
||||
@@ -0,0 +1,142 @@
|
||||
-- Update task status in view.
|
||||
DROP VIEW IF EXISTS tasks_with_status;
|
||||
|
||||
CREATE VIEW
|
||||
tasks_with_status
|
||||
AS
|
||||
SELECT
|
||||
tasks.*,
|
||||
-- Combine component statuses with precedence: build -> agent -> app.
|
||||
CASE
|
||||
WHEN tasks.workspace_id IS NULL THEN 'pending'::task_status
|
||||
WHEN build_status.status != 'active' THEN build_status.status::task_status
|
||||
WHEN agent_status.status != 'active' THEN agent_status.status::task_status
|
||||
ELSE app_status.status::task_status
|
||||
END AS status,
|
||||
-- Attach debug information for troubleshooting status.
|
||||
jsonb_build_object(
|
||||
'build', jsonb_build_object(
|
||||
'transition', latest_build_raw.transition,
|
||||
'job_status', latest_build_raw.job_status,
|
||||
'computed', build_status.status
|
||||
),
|
||||
'agent', jsonb_build_object(
|
||||
'lifecycle_state', agent_raw.lifecycle_state,
|
||||
'computed', agent_status.status
|
||||
),
|
||||
'app', jsonb_build_object(
|
||||
'health', app_raw.health,
|
||||
'computed', app_status.status
|
||||
)
|
||||
) AS status_debug,
|
||||
task_app.*,
|
||||
agent_raw.lifecycle_state AS workspace_agent_lifecycle_state,
|
||||
app_raw.health AS workspace_app_health,
|
||||
task_owner.*
|
||||
FROM
|
||||
tasks
|
||||
CROSS JOIN LATERAL (
|
||||
SELECT
|
||||
vu.username AS owner_username,
|
||||
vu.name AS owner_name,
|
||||
vu.avatar_url AS owner_avatar_url
|
||||
FROM
|
||||
visible_users vu
|
||||
WHERE
|
||||
vu.id = tasks.owner_id
|
||||
) task_owner
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT
|
||||
task_app.workspace_build_number,
|
||||
task_app.workspace_agent_id,
|
||||
task_app.workspace_app_id
|
||||
FROM
|
||||
task_workspace_apps task_app
|
||||
WHERE
|
||||
task_id = tasks.id
|
||||
ORDER BY
|
||||
task_app.workspace_build_number DESC
|
||||
LIMIT 1
|
||||
) task_app ON TRUE
|
||||
|
||||
-- Join the raw data for computing task status.
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT
|
||||
workspace_build.transition,
|
||||
provisioner_job.job_status,
|
||||
workspace_build.job_id
|
||||
FROM
|
||||
workspace_builds workspace_build
|
||||
JOIN
|
||||
provisioner_jobs provisioner_job
|
||||
ON provisioner_job.id = workspace_build.job_id
|
||||
WHERE
|
||||
workspace_build.workspace_id = tasks.workspace_id
|
||||
AND workspace_build.build_number = task_app.workspace_build_number
|
||||
) latest_build_raw ON TRUE
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT
|
||||
workspace_agent.lifecycle_state
|
||||
FROM
|
||||
workspace_agents workspace_agent
|
||||
WHERE
|
||||
workspace_agent.id = task_app.workspace_agent_id
|
||||
) agent_raw ON TRUE
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT
|
||||
workspace_app.health
|
||||
FROM
|
||||
workspace_apps workspace_app
|
||||
WHERE
|
||||
workspace_app.id = task_app.workspace_app_id
|
||||
) app_raw ON TRUE
|
||||
|
||||
-- Compute the status for each component.
|
||||
CROSS JOIN LATERAL (
|
||||
SELECT
|
||||
CASE
|
||||
WHEN latest_build_raw.job_status IS NULL THEN 'pending'::task_status
|
||||
WHEN latest_build_raw.job_status IN ('failed', 'canceling', 'canceled') THEN 'error'::task_status
|
||||
WHEN
|
||||
latest_build_raw.transition IN ('stop', 'delete')
|
||||
AND latest_build_raw.job_status = 'succeeded' THEN 'paused'::task_status
|
||||
WHEN
|
||||
latest_build_raw.transition = 'start'
|
||||
AND latest_build_raw.job_status = 'pending' THEN 'initializing'::task_status
|
||||
-- Build is running or done, defer to agent/app status.
|
||||
WHEN
|
||||
latest_build_raw.transition = 'start'
|
||||
AND latest_build_raw.job_status IN ('running', 'succeeded') THEN 'active'::task_status
|
||||
ELSE 'unknown'::task_status
|
||||
END AS status
|
||||
) build_status
|
||||
CROSS JOIN LATERAL (
|
||||
SELECT
|
||||
CASE
|
||||
-- No agent or connecting.
|
||||
WHEN
|
||||
agent_raw.lifecycle_state IS NULL
|
||||
OR agent_raw.lifecycle_state IN ('created', 'starting') THEN 'initializing'::task_status
|
||||
-- Agent is running, defer to app status.
|
||||
-- NOTE(mafredri): The start_error/start_timeout states means connected, but some startup script failed.
|
||||
-- This may or may not affect the task status but this has to be caught by app health check.
|
||||
WHEN agent_raw.lifecycle_state IN ('ready', 'start_timeout', 'start_error') THEN 'active'::task_status
|
||||
-- If the agent is shutting down or turned off, this is an unknown state because we would expect a stop
|
||||
-- build to be running.
|
||||
-- This is essentially equal to: `IN ('shutting_down', 'shutdown_timeout', 'shutdown_error', 'off')`,
|
||||
-- but we cannot use them because the values were added in a migration.
|
||||
WHEN agent_raw.lifecycle_state NOT IN ('created', 'starting', 'ready', 'start_timeout', 'start_error') THEN 'unknown'::task_status
|
||||
ELSE 'unknown'::task_status
|
||||
END AS status
|
||||
) agent_status
|
||||
CROSS JOIN LATERAL (
|
||||
SELECT
|
||||
CASE
|
||||
WHEN app_raw.health = 'initializing' THEN 'initializing'::task_status
|
||||
WHEN app_raw.health = 'unhealthy' THEN 'error'::task_status
|
||||
WHEN app_raw.health IN ('healthy', 'disabled') THEN 'active'::task_status
|
||||
ELSE 'unknown'::task_status
|
||||
END AS status
|
||||
) app_status
|
||||
WHERE
|
||||
tasks.deleted_at IS NULL;
|
||||
@@ -0,0 +1,145 @@
|
||||
-- Fix task status logic: pending provisioner job should give pending task status, not initializing.
|
||||
-- A task is pending when the provisioner hasn't picked up the job yet.
|
||||
-- A task is initializing when the provisioner is actively running the job.
|
||||
DROP VIEW IF EXISTS tasks_with_status;
|
||||
|
||||
CREATE VIEW
|
||||
tasks_with_status
|
||||
AS
|
||||
SELECT
|
||||
tasks.*,
|
||||
-- Combine component statuses with precedence: build -> agent -> app.
|
||||
CASE
|
||||
WHEN tasks.workspace_id IS NULL THEN 'pending'::task_status
|
||||
WHEN build_status.status != 'active' THEN build_status.status::task_status
|
||||
WHEN agent_status.status != 'active' THEN agent_status.status::task_status
|
||||
ELSE app_status.status::task_status
|
||||
END AS status,
|
||||
-- Attach debug information for troubleshooting status.
|
||||
jsonb_build_object(
|
||||
'build', jsonb_build_object(
|
||||
'transition', latest_build_raw.transition,
|
||||
'job_status', latest_build_raw.job_status,
|
||||
'computed', build_status.status
|
||||
),
|
||||
'agent', jsonb_build_object(
|
||||
'lifecycle_state', agent_raw.lifecycle_state,
|
||||
'computed', agent_status.status
|
||||
),
|
||||
'app', jsonb_build_object(
|
||||
'health', app_raw.health,
|
||||
'computed', app_status.status
|
||||
)
|
||||
) AS status_debug,
|
||||
task_app.*,
|
||||
agent_raw.lifecycle_state AS workspace_agent_lifecycle_state,
|
||||
app_raw.health AS workspace_app_health,
|
||||
task_owner.*
|
||||
FROM
|
||||
tasks
|
||||
CROSS JOIN LATERAL (
|
||||
SELECT
|
||||
vu.username AS owner_username,
|
||||
vu.name AS owner_name,
|
||||
vu.avatar_url AS owner_avatar_url
|
||||
FROM
|
||||
visible_users vu
|
||||
WHERE
|
||||
vu.id = tasks.owner_id
|
||||
) task_owner
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT
|
||||
task_app.workspace_build_number,
|
||||
task_app.workspace_agent_id,
|
||||
task_app.workspace_app_id
|
||||
FROM
|
||||
task_workspace_apps task_app
|
||||
WHERE
|
||||
task_id = tasks.id
|
||||
ORDER BY
|
||||
task_app.workspace_build_number DESC
|
||||
LIMIT 1
|
||||
) task_app ON TRUE
|
||||
|
||||
-- Join the raw data for computing task status.
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT
|
||||
workspace_build.transition,
|
||||
provisioner_job.job_status,
|
||||
workspace_build.job_id
|
||||
FROM
|
||||
workspace_builds workspace_build
|
||||
JOIN
|
||||
provisioner_jobs provisioner_job
|
||||
ON provisioner_job.id = workspace_build.job_id
|
||||
WHERE
|
||||
workspace_build.workspace_id = tasks.workspace_id
|
||||
AND workspace_build.build_number = task_app.workspace_build_number
|
||||
) latest_build_raw ON TRUE
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT
|
||||
workspace_agent.lifecycle_state
|
||||
FROM
|
||||
workspace_agents workspace_agent
|
||||
WHERE
|
||||
workspace_agent.id = task_app.workspace_agent_id
|
||||
) agent_raw ON TRUE
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT
|
||||
workspace_app.health
|
||||
FROM
|
||||
workspace_apps workspace_app
|
||||
WHERE
|
||||
workspace_app.id = task_app.workspace_app_id
|
||||
) app_raw ON TRUE
|
||||
|
||||
-- Compute the status for each component.
|
||||
CROSS JOIN LATERAL (
|
||||
SELECT
|
||||
CASE
|
||||
WHEN latest_build_raw.job_status IS NULL THEN 'pending'::task_status
|
||||
WHEN latest_build_raw.job_status IN ('failed', 'canceling', 'canceled') THEN 'error'::task_status
|
||||
WHEN
|
||||
latest_build_raw.transition IN ('stop', 'delete')
|
||||
AND latest_build_raw.job_status = 'succeeded' THEN 'paused'::task_status
|
||||
-- Job is pending (not picked up by provisioner yet).
|
||||
WHEN
|
||||
latest_build_raw.transition = 'start'
|
||||
AND latest_build_raw.job_status = 'pending' THEN 'pending'::task_status
|
||||
-- Job is running or done, defer to agent/app status.
|
||||
WHEN
|
||||
latest_build_raw.transition = 'start'
|
||||
AND latest_build_raw.job_status IN ('running', 'succeeded') THEN 'active'::task_status
|
||||
ELSE 'unknown'::task_status
|
||||
END AS status
|
||||
) build_status
|
||||
CROSS JOIN LATERAL (
|
||||
SELECT
|
||||
CASE
|
||||
-- No agent or connecting.
|
||||
WHEN
|
||||
agent_raw.lifecycle_state IS NULL
|
||||
OR agent_raw.lifecycle_state IN ('created', 'starting') THEN 'initializing'::task_status
|
||||
-- Agent is running, defer to app status.
|
||||
-- NOTE(mafredri): The start_error/start_timeout states means connected, but some startup script failed.
|
||||
-- This may or may not affect the task status but this has to be caught by app health check.
|
||||
WHEN agent_raw.lifecycle_state IN ('ready', 'start_timeout', 'start_error') THEN 'active'::task_status
|
||||
-- If the agent is shutting down or turned off, this is an unknown state because we would expect a stop
|
||||
-- build to be running.
|
||||
-- This is essentially equal to: `IN ('shutting_down', 'shutdown_timeout', 'shutdown_error', 'off')`,
|
||||
-- but we cannot use them because the values were added in a migration.
|
||||
WHEN agent_raw.lifecycle_state NOT IN ('created', 'starting', 'ready', 'start_timeout', 'start_error') THEN 'unknown'::task_status
|
||||
ELSE 'unknown'::task_status
|
||||
END AS status
|
||||
) agent_status
|
||||
CROSS JOIN LATERAL (
|
||||
SELECT
|
||||
CASE
|
||||
WHEN app_raw.health = 'initializing' THEN 'initializing'::task_status
|
||||
WHEN app_raw.health = 'unhealthy' THEN 'error'::task_status
|
||||
WHEN app_raw.health IN ('healthy', 'disabled') THEN 'active'::task_status
|
||||
ELSE 'unknown'::task_status
|
||||
END AS status
|
||||
) app_status
|
||||
WHERE
|
||||
tasks.deleted_at IS NULL;
|
||||
@@ -0,0 +1,16 @@
|
||||
DROP VIEW template_with_names;
|
||||
ALTER TABLE templates DROP COLUMN disable_module_cache;
|
||||
|
||||
CREATE VIEW template_with_names AS
|
||||
SELECT templates.*,
|
||||
COALESCE(visible_users.avatar_url, ''::text) AS created_by_avatar_url,
|
||||
COALESCE(visible_users.username, ''::text) AS created_by_username,
|
||||
COALESCE(visible_users.name, ''::text) AS created_by_name,
|
||||
COALESCE(organizations.name, ''::text) AS organization_name,
|
||||
COALESCE(organizations.display_name, ''::text) AS organization_display_name,
|
||||
COALESCE(organizations.icon, ''::text) AS organization_icon
|
||||
FROM ((templates
|
||||
LEFT JOIN visible_users ON ((templates.created_by = visible_users.id)))
|
||||
LEFT JOIN organizations ON ((templates.organization_id = organizations.id)));
|
||||
|
||||
COMMENT ON VIEW template_with_names IS 'Joins in the display name information such as username, avatar, and organization name.';
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user