Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| c15ce7b452 | |||
| 236f0149dd |
@@ -4,10 +4,9 @@
|
||||
|
||||
"features": {
|
||||
// See all possible options here https://github.com/devcontainers/features/tree/main/src/docker-in-docker
|
||||
"ghcr.io/devcontainers/features/docker-in-docker:2": {
|
||||
"moby": "false"
|
||||
}
|
||||
"ghcr.io/devcontainers/features/docker-in-docker:2": {}
|
||||
},
|
||||
// SYS_PTRACE to enable go debugging
|
||||
"runArgs": ["--cap-add=SYS_PTRACE"]
|
||||
// without --priviliged the Github Codespace build fails (not required otherwise)
|
||||
"runArgs": ["--cap-add=SYS_PTRACE", "--privileged"]
|
||||
}
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
# Ignore all files and folders
|
||||
**
|
||||
|
||||
# Include flake.nix and flake.lock
|
||||
!flake.nix
|
||||
!flake.lock
|
||||
@@ -1,5 +0,0 @@
|
||||
# If you would like `git blame` to ignore commits from this file, run...
|
||||
# git config blame.ignoreRevsFile .git-blame-ignore-revs
|
||||
|
||||
# chore: format code with semicolons when using prettier (#9555)
|
||||
988c9af0153561397686c119da9d1336d2433fdd
|
||||
@@ -12,4 +12,3 @@ provisionersdk/proto/*.go linguist-generated=true
|
||||
*.tfstate.dot linguist-generated=true
|
||||
*.tfplan.dot linguist-generated=true
|
||||
site/src/api/typesGenerated.ts linguist-generated=true
|
||||
site/src/pages/SetupPage/countries.tsx linguist-generated=true
|
||||
|
||||
@@ -4,15 +4,61 @@ description: |
|
||||
inputs:
|
||||
version:
|
||||
description: "The Go version to use."
|
||||
default: "1.21.5"
|
||||
default: "1.20.7"
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Cache go toolchain
|
||||
uses: buildjet/cache@v3
|
||||
with:
|
||||
path: |
|
||||
${{ runner.tool_cache }}/go/${{ inputs.version }}
|
||||
key: gotoolchain-${{ runner.os }}-${{ inputs.version }}
|
||||
restore-keys: |
|
||||
gotoolchain-${{ runner.os }}-
|
||||
|
||||
- name: Setup Go
|
||||
uses: buildjet/setup-go@v4
|
||||
with:
|
||||
# We do our own caching for implementation clarity.
|
||||
cache: false
|
||||
go-version: ${{ inputs.version }}
|
||||
|
||||
- name: Get cache dirs
|
||||
shell: bash
|
||||
run: |
|
||||
set -x
|
||||
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_ENV
|
||||
echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_ENV
|
||||
|
||||
# We split up GOMODCACHE from GOCACHE because the latter must be invalidated
|
||||
# on code change, but the former can be kept.
|
||||
- name: Cache $GOMODCACHE
|
||||
uses: buildjet/cache@v3
|
||||
with:
|
||||
path: |
|
||||
${{ env.GOMODCACHE }}
|
||||
key: gomodcache-${{ runner.os }}-${{ hashFiles('**/go.sum') }}-${{ github.job }}
|
||||
# restore-keys aren't used because it causes the cache to grow
|
||||
# infinitely. go.sum changes very infrequently, so rebuilding from
|
||||
# scratch every now and then isn't terrible.
|
||||
|
||||
- name: Cache $GOCACHE
|
||||
uses: buildjet/cache@v3
|
||||
with:
|
||||
path: |
|
||||
${{ env.GOCACHE }}
|
||||
# Job name must be included in the key for effective test cache reuse.
|
||||
# The key format is intentionally different than GOMODCACHE, because any
|
||||
# time a Go file changes we invalidate this cache, whereas GOMODCACHE is
|
||||
# only invalidated when go.sum changes.
|
||||
# The number in the key is incremented when the cache gets too large,
|
||||
# since this technically grows without bound.
|
||||
key: gocache2-${{ runner.os }}-${{ github.job }}-${{ hashFiles('**/*.go', 'go.**') }}
|
||||
restore-keys: |
|
||||
gocache2-${{ runner.os }}-${{ github.job }}-
|
||||
gocache2-${{ runner.os }}-
|
||||
|
||||
- name: Install gotestsum
|
||||
shell: bash
|
||||
run: go install gotest.tools/gotestsum@latest
|
||||
|
||||
@@ -17,7 +17,7 @@ runs:
|
||||
- name: Setup Node
|
||||
uses: buildjet/setup-node@v3
|
||||
with:
|
||||
node-version: 18.19.0
|
||||
node-version: 18.17.0
|
||||
# See https://github.com/actions/setup-node#caching-global-packages-data
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: ${{ inputs.directory }}/pnpm-lock.yaml
|
||||
|
||||
@@ -5,6 +5,6 @@ runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Setup sqlc
|
||||
uses: sqlc-dev/setup-sqlc@v4
|
||||
uses: sqlc-dev/setup-sqlc@v3
|
||||
with:
|
||||
sqlc-version: "1.25.0"
|
||||
sqlc-version: "1.20.0"
|
||||
|
||||
@@ -5,7 +5,7 @@ runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Install Terraform
|
||||
uses: hashicorp/setup-terraform@v3
|
||||
uses: hashicorp/setup-terraform@v2
|
||||
with:
|
||||
terraform_version: 1.5.7
|
||||
terraform_version: 1.5.5
|
||||
terraform_wrapper: false
|
||||
|
||||
@@ -20,7 +20,7 @@ runs:
|
||||
echo "No API key provided, skipping..."
|
||||
exit 0
|
||||
fi
|
||||
npm install -g @datadog/datadog-ci@2.21.0
|
||||
npm install -g @datadog/datadog-ci@2.10.0
|
||||
datadog-ci junit upload --service coder ./gotests.xml \
|
||||
--tags os:${{runner.os}} --tags runner_name:${{runner.name}}
|
||||
env:
|
||||
|
||||
+53
-7
@@ -8,7 +8,7 @@ updates:
|
||||
timezone: "America/Chicago"
|
||||
labels: []
|
||||
commit-message:
|
||||
prefix: "ci"
|
||||
prefix: "chore"
|
||||
ignore:
|
||||
# These actions deliver the latest versions by updating the major
|
||||
# release tag, so ignore minor and patch versions
|
||||
@@ -38,12 +38,19 @@ updates:
|
||||
commit-message:
|
||||
prefix: "chore"
|
||||
labels: []
|
||||
open-pull-requests-limit: 15
|
||||
ignore:
|
||||
# Ignore patch updates for all dependencies
|
||||
- dependency-name: "*"
|
||||
update-types:
|
||||
- version-update:semver-patch
|
||||
groups:
|
||||
otel:
|
||||
patterns:
|
||||
- "go.nhat.io/otelsql"
|
||||
- "go.opentelemetry.io/otel*"
|
||||
golang-x:
|
||||
patterns:
|
||||
- "golang.org/x/*"
|
||||
|
||||
# Update our Dockerfile.
|
||||
- package-ecosystem: "docker"
|
||||
@@ -59,6 +66,10 @@ updates:
|
||||
# We need to coordinate terraform updates with the version hardcoded in
|
||||
# our Go code.
|
||||
- dependency-name: "terraform"
|
||||
groups:
|
||||
scripts-docker:
|
||||
patterns:
|
||||
- "*"
|
||||
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/site/"
|
||||
@@ -81,11 +92,36 @@ updates:
|
||||
- dependency-name: "@types/node"
|
||||
update-types:
|
||||
- version-update:semver-major
|
||||
open-pull-requests-limit: 15
|
||||
groups:
|
||||
site:
|
||||
react:
|
||||
patterns:
|
||||
- "*"
|
||||
- "react*"
|
||||
- "@types/react*"
|
||||
xterm:
|
||||
patterns:
|
||||
- "xterm*"
|
||||
xstate:
|
||||
patterns:
|
||||
- "xstate"
|
||||
- "@xstate*"
|
||||
mui:
|
||||
patterns:
|
||||
- "@mui*"
|
||||
storybook:
|
||||
patterns:
|
||||
- "@storybook*"
|
||||
- "storybook*"
|
||||
eslint:
|
||||
patterns:
|
||||
- "eslint*"
|
||||
- "@eslint*"
|
||||
- "@typescript-eslint/eslint-plugin"
|
||||
- "@typescript-eslint/parser"
|
||||
jest:
|
||||
patterns:
|
||||
- "jest*"
|
||||
- "@swc/jest"
|
||||
- "@types/jest"
|
||||
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/offlinedocs/"
|
||||
@@ -108,12 +144,22 @@ updates:
|
||||
- dependency-name: "@types/node"
|
||||
update-types:
|
||||
- version-update:semver-major
|
||||
|
||||
# Update dogfood.
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/dogfood/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
time: "06:00"
|
||||
timezone: "America/Chicago"
|
||||
commit-message:
|
||||
prefix: "chore"
|
||||
labels: []
|
||||
groups:
|
||||
offlinedocs:
|
||||
dogfood-docker:
|
||||
patterns:
|
||||
- "*"
|
||||
|
||||
# Update dogfood.
|
||||
- package-ecosystem: "terraform"
|
||||
directory: "/dogfood/"
|
||||
schedule:
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
app = "paris-coder"
|
||||
primary_region = "cdg"
|
||||
|
||||
[experimental]
|
||||
entrypoint = ["/bin/sh", "-c", "CODER_DERP_SERVER_RELAY_URL=\"http://[${FLY_PRIVATE_IP}]:3000\" /opt/coder wsproxy server"]
|
||||
auto_rollback = true
|
||||
|
||||
[build]
|
||||
image = "ghcr.io/coder/coder-preview:main"
|
||||
|
||||
[env]
|
||||
CODER_ACCESS_URL = "https://paris.fly.dev.coder.com"
|
||||
CODER_HTTP_ADDRESS = "0.0.0.0:3000"
|
||||
CODER_PRIMARY_ACCESS_URL = "https://dev.coder.com"
|
||||
CODER_WILDCARD_ACCESS_URL = "*--apps.paris.fly.dev.coder.com"
|
||||
CODER_VERBOSE = "true"
|
||||
|
||||
[http_service]
|
||||
internal_port = 3000
|
||||
force_https = true
|
||||
auto_stop_machines = true
|
||||
auto_start_machines = true
|
||||
min_machines_running = 0
|
||||
|
||||
[[vm]]
|
||||
cpu_kind = "shared"
|
||||
cpus = 2
|
||||
memory_mb = 512
|
||||
@@ -1,28 +0,0 @@
|
||||
app = "sao-paulo-coder"
|
||||
primary_region = "gru"
|
||||
|
||||
[experimental]
|
||||
entrypoint = ["/bin/sh", "-c", "CODER_DERP_SERVER_RELAY_URL=\"http://[${FLY_PRIVATE_IP}]:3000\" /opt/coder wsproxy server"]
|
||||
auto_rollback = true
|
||||
|
||||
[build]
|
||||
image = "ghcr.io/coder/coder-preview:main"
|
||||
|
||||
[env]
|
||||
CODER_ACCESS_URL = "https://sao-paulo.fly.dev.coder.com"
|
||||
CODER_HTTP_ADDRESS = "0.0.0.0:3000"
|
||||
CODER_PRIMARY_ACCESS_URL = "https://dev.coder.com"
|
||||
CODER_WILDCARD_ACCESS_URL = "*--apps.sao-paulo.fly.dev.coder.com"
|
||||
CODER_VERBOSE = "true"
|
||||
|
||||
[http_service]
|
||||
internal_port = 3000
|
||||
force_https = true
|
||||
auto_stop_machines = true
|
||||
auto_start_machines = true
|
||||
min_machines_running = 0
|
||||
|
||||
[[vm]]
|
||||
cpu_kind = "shared"
|
||||
cpus = 2
|
||||
memory_mb = 512
|
||||
@@ -1,28 +0,0 @@
|
||||
app = "sydney-coder"
|
||||
primary_region = "syd"
|
||||
|
||||
[experimental]
|
||||
entrypoint = ["/bin/sh", "-c", "CODER_DERP_SERVER_RELAY_URL=\"http://[${FLY_PRIVATE_IP}]:3000\" /opt/coder wsproxy server"]
|
||||
auto_rollback = true
|
||||
|
||||
[build]
|
||||
image = "ghcr.io/coder/coder-preview:main"
|
||||
|
||||
[env]
|
||||
CODER_ACCESS_URL = "https://sydney.fly.dev.coder.com"
|
||||
CODER_HTTP_ADDRESS = "0.0.0.0:3000"
|
||||
CODER_PRIMARY_ACCESS_URL = "https://dev.coder.com"
|
||||
CODER_WILDCARD_ACCESS_URL = "*--apps.sydney.fly.dev.coder.com"
|
||||
CODER_VERBOSE = "true"
|
||||
|
||||
[http_service]
|
||||
internal_port = 3000
|
||||
force_https = true
|
||||
auto_stop_machines = true
|
||||
auto_start_machines = true
|
||||
min_machines_running = 0
|
||||
|
||||
[[vm]]
|
||||
cpu_kind = "shared"
|
||||
cpus = 2
|
||||
memory_mb = 512
|
||||
@@ -1,10 +1,12 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
coder = {
|
||||
source = "coder/coder"
|
||||
source = "coder/coder"
|
||||
version = "~> 0.11.0"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
source = "hashicorp/kubernetes"
|
||||
version = "~> 2.22"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -235,9 +237,6 @@ resource "kubernetes_deployment" "main" {
|
||||
"app.kubernetes.io/name" = "coder-workspace"
|
||||
}
|
||||
}
|
||||
strategy {
|
||||
type = "Recreate"
|
||||
}
|
||||
|
||||
template {
|
||||
metadata {
|
||||
|
||||
+181
-311
@@ -31,22 +31,20 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
docs-only: ${{ steps.filter.outputs.docs_count == steps.filter.outputs.all_count }}
|
||||
docs: ${{ steps.filter.outputs.docs }}
|
||||
go: ${{ steps.filter.outputs.go }}
|
||||
ts: ${{ steps.filter.outputs.ts }}
|
||||
k8s: ${{ steps.filter.outputs.k8s }}
|
||||
ci: ${{ steps.filter.outputs.ci }}
|
||||
db: ${{ steps.filter.outputs.db }}
|
||||
offlinedocs-only: ${{ steps.filter.outputs.offlinedocs_count == steps.filter.outputs.all_count }}
|
||||
offlinedocs: ${{ steps.filter.outputs.offlinedocs }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
# For pull requests it's not necessary to checkout the code
|
||||
- name: check changed files
|
||||
uses: dorny/paths-filter@v3
|
||||
uses: dorny/paths-filter@v2
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
@@ -55,12 +53,10 @@ jobs:
|
||||
docs:
|
||||
- "docs/**"
|
||||
- "README.md"
|
||||
- "examples/templates/**"
|
||||
- "examples/web-server/**"
|
||||
- "examples/monitoring/**"
|
||||
- "examples/lima/**"
|
||||
db:
|
||||
- "**.sql"
|
||||
- "coderd/database/**"
|
||||
go:
|
||||
- "**.sql"
|
||||
- "**.go"
|
||||
@@ -114,7 +110,7 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
@@ -131,7 +127,7 @@ jobs:
|
||||
echo "LINT_CACHE_DIR=$dir" >> $GITHUB_ENV
|
||||
|
||||
- name: golangci-lint cache
|
||||
uses: buildjet/cache@v4
|
||||
uses: buildjet/cache@v3
|
||||
with:
|
||||
path: |
|
||||
${{ env.LINT_CACHE_DIR }}
|
||||
@@ -141,7 +137,7 @@ jobs:
|
||||
|
||||
# Check for any typos
|
||||
- name: Check for typos
|
||||
uses: crate-ci/typos@v1.18.0
|
||||
uses: crate-ci/typos@v1.16.4
|
||||
with:
|
||||
config: .github/workflows/typos.toml
|
||||
|
||||
@@ -169,7 +165,7 @@ jobs:
|
||||
if: needs.changes.outputs.docs-only == 'false' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
@@ -188,17 +184,20 @@ jobs:
|
||||
go install storj.io/drpc/cmd/protoc-gen-go-drpc@v0.0.33
|
||||
go install golang.org/x/tools/cmd/goimports@latest
|
||||
go install github.com/mikefarah/yq/v4@v4.30.6
|
||||
go install go.uber.org/mock/mockgen@v0.4.0
|
||||
go install github.com/golang/mock/mockgen@v1.6.0
|
||||
|
||||
- name: Install Protoc
|
||||
run: |
|
||||
mkdir -p /tmp/proto
|
||||
pushd /tmp/proto
|
||||
curl -L -o protoc.zip https://github.com/protocolbuffers/protobuf/releases/download/v23.3/protoc-23.3-linux-x86_64.zip
|
||||
unzip protoc.zip
|
||||
cp -r ./bin/* /usr/local/bin
|
||||
cp -r ./include /usr/local/bin/include
|
||||
popd
|
||||
# protoc must be in lockstep with our dogfood Dockerfile or the
|
||||
# version in the comments will differ. This is also defined in
|
||||
# security.yaml
|
||||
set -x
|
||||
cd dogfood
|
||||
DOCKER_BUILDKIT=1 docker build . --target proto -t protoc
|
||||
protoc_path=/usr/local/bin/protoc
|
||||
docker run --rm --entrypoint cat protoc /tmp/bin/protoc > $protoc_path
|
||||
chmod +x $protoc_path
|
||||
protoc --version
|
||||
|
||||
- name: make gen
|
||||
run: "make --output-sync -j -B gen"
|
||||
@@ -213,7 +212,7 @@ jobs:
|
||||
timeout-minutes: 7
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
@@ -221,14 +220,14 @@ jobs:
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- name: Setup Go
|
||||
uses: buildjet/setup-go@v5
|
||||
uses: buildjet/setup-go@v4
|
||||
with:
|
||||
# This doesn't need caching. It's super fast anyways!
|
||||
cache: false
|
||||
go-version: 1.21.5
|
||||
go-version: 1.20.7
|
||||
|
||||
- name: Install shfmt
|
||||
run: go install mvdan.cc/sh/v3/cmd/shfmt@v3.7.0
|
||||
run: go install mvdan.cc/sh/v3/cmd/shfmt@v3.5.0
|
||||
|
||||
- name: make fmt
|
||||
run: |
|
||||
@@ -239,7 +238,7 @@ jobs:
|
||||
run: ./scripts/check_unstaged.sh
|
||||
|
||||
test-go:
|
||||
runs-on: ${{ matrix.os == 'ubuntu-latest' && github.repository_owner == 'coder' && 'buildjet-4vcpu-ubuntu-2204' || matrix.os == 'macos-latest' && github.repository_owner == 'coder' && 'macos-latest-xlarge' || matrix.os == 'windows-2022' && github.repository_owner == 'coder' && 'windows-latest-16-cores' || matrix.os }}
|
||||
runs-on: ${{ matrix.os == 'ubuntu-latest' && github.repository_owner == 'coder' && 'buildjet-4vcpu-ubuntu-2204' || matrix.os == 'macos-latest' && github.repository_owner == 'coder' && 'macos-latest-xl' || matrix.os == 'windows-2019' && github.repository_owner == 'coder' && 'windows-latest-8-cores' || matrix.os }}
|
||||
needs: changes
|
||||
if: needs.changes.outputs.go == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
timeout-minutes: 20
|
||||
@@ -249,10 +248,10 @@ jobs:
|
||||
os:
|
||||
- ubuntu-latest
|
||||
- macos-latest
|
||||
- windows-2022
|
||||
- windows-2019
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
@@ -276,36 +275,29 @@ jobs:
|
||||
echo "cover=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# if macOS, install google-chrome for scaletests. As another concern,
|
||||
# should we really have this kind of external dependency requirement
|
||||
# on standard CI?
|
||||
if [ "${{ matrix.os }}" == "macos-latest" ]; then
|
||||
brew install google-chrome
|
||||
fi
|
||||
|
||||
# By default Go will use the number of logical CPUs, which
|
||||
# is a fine default.
|
||||
PARALLEL_FLAG=""
|
||||
|
||||
# macOS will output "The default interactive shell is now zsh"
|
||||
# intermittently in CI...
|
||||
if [ "${{ matrix.os }}" == "macos-latest" ]; then
|
||||
touch ~/.bash_profile && echo "export BASH_SILENCE_DEPRECATION_WARNING=1" >> ~/.bash_profile
|
||||
fi
|
||||
export TS_DEBUG_DISCO=true
|
||||
gotestsum --junitfile="gotests.xml" --jsonfile="gotests.json" \
|
||||
--packages="./..." -- $PARALLEL_FLAG -short -failfast $COVERAGE_FLAGS
|
||||
|
||||
- name: Print test stats
|
||||
if: success() || failure()
|
||||
run: |
|
||||
# Artifacts are not available after rerunning a job,
|
||||
# so we need to print the test stats to the log.
|
||||
go run ./scripts/ci-report/main.go gotests.json | tee gotests_stats.json
|
||||
|
||||
- name: Upload test stats to Datadog
|
||||
timeout-minutes: 1
|
||||
continue-on-error: true
|
||||
uses: ./.github/actions/upload-datadog
|
||||
if: success() || failure()
|
||||
with:
|
||||
api-key: ${{ secrets.DATADOG_API_KEY }}
|
||||
|
||||
- name: Check code coverage
|
||||
uses: codecov/codecov-action@v4
|
||||
uses: codecov/codecov-action@v3
|
||||
# This action has a tendency to error out unexpectedly, it has
|
||||
# the `fail_ci_if_error` option that defaults to `false`, but
|
||||
# that is no guarantee, see:
|
||||
@@ -319,8 +311,7 @@ jobs:
|
||||
|
||||
test-go-pg:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
needs:
|
||||
- changes
|
||||
needs: changes
|
||||
if: needs.changes.outputs.go == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
# This timeout must be greater than the timeout set by `go test` in
|
||||
# `make test-postgres` to ensure we receive a trace of running
|
||||
@@ -329,7 +320,7 @@ jobs:
|
||||
timeout-minutes: 25
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
@@ -344,16 +335,21 @@ jobs:
|
||||
export TS_DEBUG_DISCO=true
|
||||
make test-postgres
|
||||
|
||||
- name: Print test stats
|
||||
if: success() || failure()
|
||||
run: |
|
||||
# Artifacts are not available after rerunning a job,
|
||||
# so we need to print the test stats to the log.
|
||||
go run ./scripts/ci-report/main.go gotests.json | tee gotests_stats.json
|
||||
|
||||
- name: Upload test stats to Datadog
|
||||
timeout-minutes: 1
|
||||
continue-on-error: true
|
||||
uses: ./.github/actions/upload-datadog
|
||||
if: success() || failure()
|
||||
with:
|
||||
api-key: ${{ secrets.DATADOG_API_KEY }}
|
||||
|
||||
- name: Check code coverage
|
||||
uses: codecov/codecov-action@v4
|
||||
uses: codecov/codecov-action@v3
|
||||
# This action has a tendency to error out unexpectedly, it has
|
||||
# the `fail_ci_if_error` option that defaults to `false`, but
|
||||
# that is no guarantee, see:
|
||||
@@ -372,7 +368,7 @@ jobs:
|
||||
timeout-minutes: 25
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
@@ -387,13 +383,105 @@ jobs:
|
||||
gotestsum --junitfile="gotests.xml" -- -race ./...
|
||||
|
||||
- name: Upload test stats to Datadog
|
||||
timeout-minutes: 1
|
||||
continue-on-error: true
|
||||
uses: ./.github/actions/upload-datadog
|
||||
if: always()
|
||||
with:
|
||||
api-key: ${{ secrets.DATADOG_API_KEY }}
|
||||
|
||||
deploy:
|
||||
name: "deploy"
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
timeout-minutes: 30
|
||||
needs: changes
|
||||
if: |
|
||||
github.ref == 'refs/heads/main' && !github.event.pull_request.head.repo.fork
|
||||
&& needs.changes.outputs.docs-only == 'false'
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Authenticate to Google Cloud
|
||||
uses: google-github-actions/auth@v1
|
||||
with:
|
||||
workload_identity_provider: projects/573722524737/locations/global/workloadIdentityPools/github/providers/github
|
||||
service_account: coder-ci@coder-dogfood.iam.gserviceaccount.com
|
||||
|
||||
- name: Set up Google Cloud SDK
|
||||
uses: google-github-actions/setup-gcloud@v1
|
||||
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports@latest
|
||||
- name: Install nfpm
|
||||
run: go install github.com/goreleaser/nfpm/v2/cmd/nfpm@v2.16.0
|
||||
|
||||
- name: Install zstd
|
||||
run: sudo apt-get install -y zstd
|
||||
|
||||
- name: Build Release
|
||||
run: |
|
||||
set -euo pipefail
|
||||
go mod download
|
||||
|
||||
version="$(./scripts/version.sh)"
|
||||
make gen/mark-fresh
|
||||
make -j \
|
||||
build/coder_"$version"_windows_amd64.zip \
|
||||
build/coder_"$version"_linux_amd64.{tar.gz,deb}
|
||||
|
||||
- name: Install Release
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
regions=(
|
||||
# gcp-region-id instance-name systemd-service-name
|
||||
"us-central1-a coder coder"
|
||||
"australia-southeast1-b coder-sydney coder-workspace-proxy"
|
||||
"europe-west3-c coder-europe coder-workspace-proxy"
|
||||
"southamerica-east1-b coder-brazil coder-workspace-proxy"
|
||||
)
|
||||
|
||||
deb_pkg="./build/coder_$(./scripts/version.sh)_linux_amd64.deb"
|
||||
if [ ! -f "$deb_pkg" ]; then
|
||||
echo "deb package not found: $deb_pkg"
|
||||
ls -l ./build
|
||||
exit 1
|
||||
fi
|
||||
|
||||
gcloud config set project coder-dogfood
|
||||
for region in "${regions[@]}"; do
|
||||
echo "::group::$region"
|
||||
set -- $region
|
||||
|
||||
set -x
|
||||
gcloud config set compute/zone "$1"
|
||||
gcloud compute scp "$deb_pkg" "${2}:/tmp/coder.deb"
|
||||
gcloud compute ssh "$2" -- /bin/sh -c "set -eux; sudo dpkg -i --force-confdef /tmp/coder.deb; sudo systemctl daemon-reload; sudo service '$3' restart"
|
||||
set +x
|
||||
|
||||
echo "::endgroup::"
|
||||
done
|
||||
|
||||
- name: Upload build artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coder
|
||||
path: |
|
||||
./build/*.zip
|
||||
./build/*.tar.gz
|
||||
./build/*.deb
|
||||
retention-days: 7
|
||||
|
||||
test-js:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
needs: changes
|
||||
@@ -401,7 +489,7 @@ jobs:
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
@@ -412,7 +500,7 @@ jobs:
|
||||
working-directory: site
|
||||
|
||||
- name: Check code coverage
|
||||
uses: codecov/codecov-action@v4
|
||||
uses: codecov/codecov-action@v3
|
||||
# This action has a tendency to error out unexpectedly, it has
|
||||
# the `fail_ci_if_error` option that defaults to `false`, but
|
||||
# that is no guarantee, see:
|
||||
@@ -425,13 +513,13 @@ jobs:
|
||||
flags: unittest-js
|
||||
|
||||
test-e2e:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-16vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
needs: changes
|
||||
if: needs.changes.outputs.go == 'true' || needs.changes.outputs.ts == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
@@ -444,24 +532,6 @@ jobs:
|
||||
- name: Setup Terraform
|
||||
uses: ./.github/actions/setup-tf
|
||||
|
||||
- name: go install tools
|
||||
run: |
|
||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.30
|
||||
go install storj.io/drpc/cmd/protoc-gen-go-drpc@v0.0.33
|
||||
go install golang.org/x/tools/cmd/goimports@latest
|
||||
go install github.com/mikefarah/yq/v4@v4.30.6
|
||||
go install go.uber.org/mock/mockgen@v0.4.0
|
||||
|
||||
- name: Install Protoc
|
||||
run: |
|
||||
mkdir -p /tmp/proto
|
||||
pushd /tmp/proto
|
||||
curl -L -o protoc.zip https://github.com/protocolbuffers/protobuf/releases/download/v23.3/protoc-23.3-linux-x86_64.zip
|
||||
unzip protoc.zip
|
||||
cp -r ./bin/* /usr/local/bin
|
||||
cp -r ./include /usr/local/bin/include
|
||||
popd
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
make -B site/out/index.html
|
||||
@@ -469,27 +539,19 @@ jobs:
|
||||
- run: pnpm playwright:install
|
||||
working-directory: site
|
||||
|
||||
- run: pnpm playwright:test --workers 1
|
||||
- run: pnpm playwright:test
|
||||
env:
|
||||
DEBUG: pw:api
|
||||
working-directory: site
|
||||
|
||||
- name: Upload Playwright Failed Tests
|
||||
if: always() && github.actor != 'dependabot[bot]' && runner.os == 'Linux' && !github.event.pull_request.head.repo.fork
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: failed-test-videos
|
||||
path: ./site/test-results/**/*.webm
|
||||
retention-days: 7
|
||||
|
||||
- name: Upload pprof dumps
|
||||
if: always() && github.actor != 'dependabot[bot]' && runner.os == 'Linux' && !github.event.pull_request.head.repo.fork
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: debug-pprof-dumps
|
||||
path: ./site/test-results/**/debug-pprof-*.txt
|
||||
retention-days: 7
|
||||
|
||||
chromatic:
|
||||
# REMARK: this is only used to build storybook and deploy it to Chromatic.
|
||||
runs-on: ubuntu-latest
|
||||
@@ -497,7 +559,7 @@ jobs:
|
||||
if: needs.changes.outputs.ts == 'true' || needs.changes.outputs.ci == 'true'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
# Required by Chromatic for build-over-build history, otherwise we
|
||||
# only get 1 commit on shallow checkout.
|
||||
@@ -511,13 +573,12 @@ jobs:
|
||||
# the check to pass. This is desired in PRs, but not in mainline.
|
||||
- name: Publish to Chromatic (non-mainline)
|
||||
if: github.ref != 'refs/heads/main' && github.repository_owner == 'coder'
|
||||
uses: chromaui/action@v10
|
||||
uses: chromaui/action@v1
|
||||
env:
|
||||
NODE_OPTIONS: "--max_old_space_size=4096"
|
||||
STORYBOOK: true
|
||||
with:
|
||||
# Do a fast, testing build for change previews
|
||||
buildScriptName: "storybook:ci"
|
||||
buildScriptName: "storybook:build"
|
||||
exitOnceUploaded: true
|
||||
# This will prevent CI from failing when Chromatic detects visual changes
|
||||
exitZeroOnChanges: true
|
||||
@@ -525,14 +586,11 @@ jobs:
|
||||
# https://www.chromatic.com/docs/github-actions#forked-repositories
|
||||
projectToken: 695c25b6cb65
|
||||
workingDir: "./site"
|
||||
storybookBaseDir: "./site"
|
||||
# Prevent excessive build runs on minor version changes
|
||||
skip: "@(renovate/**|dependabot/**)"
|
||||
# Run TurboSnap to trace file dependencies to related stories
|
||||
# and tell chromatic to only take snapshots of relevent stories
|
||||
onlyChanged: true
|
||||
# Avoid uploading single files, because that's very slow
|
||||
zip: true
|
||||
|
||||
# This is a separate step for mainline only that auto accepts and changes
|
||||
# instead of holding CI up. Since we squash/merge, this is defensive to
|
||||
@@ -542,7 +600,7 @@ jobs:
|
||||
# infinitely "in progress" in mainline unless we re-review each build.
|
||||
- name: Publish to Chromatic (mainline)
|
||||
if: github.ref == 'refs/heads/main' && github.repository_owner == 'coder'
|
||||
uses: chromaui/action@v10
|
||||
uses: chromaui/action@v1
|
||||
env:
|
||||
NODE_OPTIONS: "--max_old_space_size=4096"
|
||||
STORYBOOK: true
|
||||
@@ -550,26 +608,21 @@ jobs:
|
||||
autoAcceptChanges: true
|
||||
# This will prevent CI from failing when Chromatic detects visual changes
|
||||
exitZeroOnChanges: true
|
||||
# Do a full build with documentation for mainline builds
|
||||
buildScriptName: "storybook:build"
|
||||
projectToken: 695c25b6cb65
|
||||
workingDir: "./site"
|
||||
storybookBaseDir: "./site"
|
||||
# Run TurboSnap to trace file dependencies to related stories
|
||||
# and tell chromatic to only take snapshots of relevent stories
|
||||
onlyChanged: true
|
||||
# Avoid uploading single files, because that's very slow
|
||||
zip: true
|
||||
|
||||
offlinedocs:
|
||||
name: offlinedocs
|
||||
needs: changes
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
if: needs.changes.outputs.offlinedocs == 'true' || needs.changes.outputs.ci == 'true' || needs.changes.outputs.docs == 'true'
|
||||
|
||||
if: needs.changes.outputs.offlinedocs == 'true' || needs.changes.outputs.ci == 'true'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
# 0 is required here for version.sh to work.
|
||||
fetch-depth: 0
|
||||
@@ -579,26 +632,12 @@ jobs:
|
||||
with:
|
||||
directory: offlinedocs
|
||||
|
||||
- name: Install Protoc
|
||||
run: |
|
||||
mkdir -p /tmp/proto
|
||||
pushd /tmp/proto
|
||||
curl -L -o protoc.zip https://github.com/protocolbuffers/protobuf/releases/download/v23.3/protoc-23.3-linux-x86_64.zip
|
||||
unzip protoc.zip
|
||||
cp -r ./bin/* /usr/local/bin
|
||||
cp -r ./include /usr/local/bin/include
|
||||
popd
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Install go tools
|
||||
run: |
|
||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.30
|
||||
go install storj.io/drpc/cmd/protoc-gen-go-drpc@v0.0.33
|
||||
go install golang.org/x/tools/cmd/goimports@latest
|
||||
go install github.com/mikefarah/yq/v4@v4.30.6
|
||||
go install go.uber.org/mock/mockgen@v0.4.0
|
||||
go install github.com/golang/mock/mockgen@v1.6.0
|
||||
|
||||
- name: Setup sqlc
|
||||
uses: ./.github/actions/setup-sqlc
|
||||
@@ -629,7 +668,6 @@ jobs:
|
||||
- test-js
|
||||
- test-e2e
|
||||
- offlinedocs
|
||||
- sqlc-vet
|
||||
# Allow this job to run even if the needed jobs fail, are skipped or
|
||||
# cancelled.
|
||||
if: always()
|
||||
@@ -644,8 +682,6 @@ jobs:
|
||||
echo "- test-go-pg: ${{ needs.test-go-pg.result }}"
|
||||
echo "- test-go-race: ${{ needs.test-go-race.result }}"
|
||||
echo "- test-js: ${{ needs.test-js.result }}"
|
||||
echo "- test-e2e: ${{ needs.test-e2e.result }}"
|
||||
echo "- offlinedocs: ${{ needs.offlinedocs.result }}"
|
||||
echo
|
||||
|
||||
# We allow skipped jobs to pass, but not failed or cancelled jobs.
|
||||
@@ -656,231 +692,65 @@ jobs:
|
||||
|
||||
echo "Required checks have passed"
|
||||
|
||||
build:
|
||||
# This builds and publishes ghcr.io/coder/coder-preview:main for each commit
|
||||
# to main branch. We are only building this for amd64 platform. (>95% pulls
|
||||
# are for amd64)
|
||||
build-main-image:
|
||||
# This build and publihes ghcr.io/coder/coder-preview:main for each merge commit to main branch.
|
||||
# We are only building this for amd64 plateform. (>95% pulls are for amd64)
|
||||
needs: changes
|
||||
if: needs.changes.outputs.docs-only == 'false' && !github.event.pull_request.head.repo.fork
|
||||
if: github.ref == 'refs/heads/main' && needs.changes.outputs.docs-only == 'false'
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
env:
|
||||
DOCKER_CLI_EXPERIMENTAL: "enabled"
|
||||
outputs:
|
||||
IMAGE: ghcr.io/coder/coder-preview:${{ steps.build-docker.outputs.tag }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: GHCR Login
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Install nfpm
|
||||
run: go install github.com/goreleaser/nfpm/v2/cmd/nfpm@v2.35.1
|
||||
- name: Setup sqlc
|
||||
uses: ./.github/actions/setup-sqlc
|
||||
|
||||
- name: Install zstd
|
||||
run: sudo apt-get install -y zstd
|
||||
- name: GHCR Login
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build
|
||||
- name: Build and push Linux amd64 Docker image
|
||||
id: build_and_push
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
go mod download
|
||||
|
||||
version="$(./scripts/version.sh)"
|
||||
tag="main-$(echo "$version" | sed 's/+/-/g')"
|
||||
echo "tag=$tag" >> $GITHUB_OUTPUT
|
||||
|
||||
make gen/mark-fresh
|
||||
make -j \
|
||||
build/coder_linux_{amd64,arm64,armv7} \
|
||||
build/coder_"$version"_windows_amd64.zip \
|
||||
build/coder_"$version"_linux_amd64.{tar.gz,deb}
|
||||
|
||||
- name: Build Linux Docker images
|
||||
id: build-docker
|
||||
env:
|
||||
CODER_IMAGE_BASE: ghcr.io/coder/coder-preview
|
||||
CODER_IMAGE_TAG_PREFIX: main
|
||||
DOCKER_CLI_EXPERIMENTAL: "enabled"
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
|
||||
# build Docker images for each architecture
|
||||
export DOCKER_IMAGE_NO_PREREQUISITES=true
|
||||
version="$(./scripts/version.sh)"
|
||||
tag="main-$(echo "$version" | sed 's/+/-/g')"
|
||||
echo "tag=$tag" >> $GITHUB_OUTPUT
|
||||
export CODER_IMAGE_BUILD_BASE_TAG="$(CODER_IMAGE_BASE=coder-base ./scripts/image_tag.sh --version "$version")"
|
||||
make -j build/coder_linux_amd64
|
||||
./scripts/build_docker.sh \
|
||||
--arch amd64 \
|
||||
--target ghcr.io/coder/coder-preview:main \
|
||||
--version $version \
|
||||
--push \
|
||||
build/coder_linux_amd64
|
||||
|
||||
# build images for each architecture
|
||||
make -j build/coder_"$version"_linux_{amd64,arm64,armv7}.tag
|
||||
|
||||
# only push if we are on main branch
|
||||
if [ "${{ github.ref }}" == "refs/heads/main" ]; then
|
||||
# build and push multi-arch manifest, this depends on the other images
|
||||
# being pushed so will automatically push them
|
||||
make -j push/build/coder_"$version"_linux_{amd64,arm64,armv7}.tag
|
||||
|
||||
# Define specific tags
|
||||
tags=("$tag" "main" "latest")
|
||||
|
||||
# Create and push a multi-arch manifest for each tag
|
||||
# we are adding `latest` tag and keeping `main` for backward
|
||||
# compatibality
|
||||
for t in "${tags[@]}"; do
|
||||
./scripts/build_docker_multiarch.sh \
|
||||
--push \
|
||||
--target "ghcr.io/coder/coder-preview:$t" \
|
||||
--version $version \
|
||||
$(cat build/coder_"$version"_linux_{amd64,arm64,armv7}.tag)
|
||||
done
|
||||
fi
|
||||
# Tag image with new package tag and push
|
||||
tag=$(echo "$version" | sed 's/+/-/g')
|
||||
docker tag ghcr.io/coder/coder-preview:main ghcr.io/coder/coder-preview:main-$tag
|
||||
docker push ghcr.io/coder/coder-preview:main-$tag
|
||||
|
||||
- name: Prune old images
|
||||
if: github.ref == 'refs/heads/main'
|
||||
uses: vlaurin/action-ghcr-prune@v0.6.0
|
||||
uses: vlaurin/action-ghcr-prune@v0.5.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
organization: coder
|
||||
container: coder-preview
|
||||
keep-younger-than: 7 # days
|
||||
keep-tags: latest
|
||||
keep-tags-regexes: ^pr
|
||||
prune-tags-regexes: |
|
||||
^main-
|
||||
^v
|
||||
prune-tags-regexes: ^main-
|
||||
prune-untagged: true
|
||||
|
||||
- name: Upload build artifacts
|
||||
if: github.ref == 'refs/heads/main'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coder
|
||||
path: |
|
||||
./build/*.zip
|
||||
./build/*.tar.gz
|
||||
./build/*.deb
|
||||
retention-days: 7
|
||||
|
||||
deploy:
|
||||
name: "deploy"
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
needs:
|
||||
- changes
|
||||
- build
|
||||
if: |
|
||||
github.ref == 'refs/heads/main' && !github.event.pull_request.head.repo.fork
|
||||
&& needs.changes.outputs.docs-only == 'false'
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Authenticate to Google Cloud
|
||||
uses: google-github-actions/auth@v2
|
||||
with:
|
||||
workload_identity_provider: projects/573722524737/locations/global/workloadIdentityPools/github/providers/github
|
||||
service_account: coder-ci@coder-dogfood.iam.gserviceaccount.com
|
||||
|
||||
- name: Set up Google Cloud SDK
|
||||
uses: google-github-actions/setup-gcloud@v2
|
||||
|
||||
- name: Set up Flux CLI
|
||||
uses: fluxcd/flux2/action@main
|
||||
with:
|
||||
# Keep this up to date with the version of flux installed in dogfood cluster
|
||||
version: "2.2.1"
|
||||
|
||||
- name: Get Cluster Credentials
|
||||
uses: "google-github-actions/get-gke-credentials@v2"
|
||||
with:
|
||||
cluster_name: dogfood-v2
|
||||
location: us-central1-a
|
||||
project_id: coder-dogfood-v2
|
||||
|
||||
- name: Reconcile Flux
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
flux --namespace flux-system reconcile source git flux-system
|
||||
flux --namespace flux-system reconcile source git coder-main
|
||||
flux --namespace flux-system reconcile kustomization flux-system
|
||||
flux --namespace flux-system reconcile kustomization coder
|
||||
flux --namespace flux-system reconcile source chart coder-coder
|
||||
flux --namespace flux-system reconcile source chart coder-coder-provisioner
|
||||
flux --namespace coder reconcile helmrelease coder
|
||||
flux --namespace coder reconcile helmrelease coder-provisioner
|
||||
|
||||
# Just updating Flux is usually not enough. The Helm release may get
|
||||
# redeployed, but unless something causes the Deployment to update the
|
||||
# pods won't be recreated. It's important that the pods get recreated,
|
||||
# since we use `imagePullPolicy: Always` to ensure we're running the
|
||||
# latest image.
|
||||
- name: Rollout Deployment
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
kubectl --namespace coder rollout restart deployment/coder
|
||||
kubectl --namespace coder rollout status deployment/coder
|
||||
kubectl --namespace coder rollout restart deployment/coder-provisioner
|
||||
kubectl --namespace coder rollout status deployment/coder-provisioner
|
||||
|
||||
deploy-wsproxies:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
if: github.ref == 'refs/heads/main' && !github.event.pull_request.head.repo.fork
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup flyctl
|
||||
uses: superfly/flyctl-actions/setup-flyctl@master
|
||||
|
||||
- name: Deploy workspace proxies
|
||||
run: |
|
||||
flyctl deploy --image "$IMAGE" --app paris-coder --config ./.github/fly-wsproxies/paris-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_PARIS" --yes
|
||||
flyctl deploy --image "$IMAGE" --app sydney-coder --config ./.github/fly-wsproxies/sydney-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_SYDNEY" --yes
|
||||
flyctl deploy --image "$IMAGE" --app sao-paulo-coder --config ./.github/fly-wsproxies/sao-paulo-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_SAO_PAULO" --yes
|
||||
env:
|
||||
FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }}
|
||||
IMAGE: ${{ needs.build.outputs.IMAGE }}
|
||||
TOKEN_PARIS: ${{ secrets.FLY_PARIS_CODER_PROXY_SESSION_TOKEN }}
|
||||
TOKEN_SYDNEY: ${{ secrets.FLY_SYDNEY_CODER_PROXY_SESSION_TOKEN }}
|
||||
TOKEN_SAO_PAULO: ${{ secrets.FLY_SAO_PAULO_CODER_PROXY_SESSION_TOKEN }}
|
||||
|
||||
# sqlc-vet runs a postgres docker container, runs Coder migrations, and then
|
||||
# runs sqlc-vet to ensure all queries are valid. This catches any mistakes
|
||||
# in migrations or sqlc queries that makes a query unable to be prepared.
|
||||
sqlc-vet:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
needs: changes
|
||||
if: needs.changes.outputs.db == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
# We need golang to run the migration main.go
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Setup sqlc
|
||||
uses: ./.github/actions/setup-sqlc
|
||||
|
||||
- name: Setup and run sqlc vet
|
||||
run: |
|
||||
make sqlc-vet
|
||||
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: auto-approve dependabot
|
||||
uses: hmarr/auto-approve-action@v4
|
||||
uses: hmarr/auto-approve-action@v3
|
||||
if: github.actor == 'dependabot[bot]'
|
||||
|
||||
cla:
|
||||
@@ -34,7 +34,7 @@ jobs:
|
||||
steps:
|
||||
- name: cla
|
||||
if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target'
|
||||
uses: contributor-assistant/github-action@v2.3.1
|
||||
uses: contributor-assistant/github-action@v2.3.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# the below token should have repo scope and must be manually added by you in the repository's secret
|
||||
@@ -46,8 +46,7 @@ jobs:
|
||||
path-to-document: "https://github.com/coder/cla/blob/main/README.md"
|
||||
# branch should not be protected
|
||||
branch: "main"
|
||||
# Some users have signed a corporate CLA with Coder so are exempt from signing our community one.
|
||||
allowlist: "coryb,aaronlehmann,dependabot*"
|
||||
allowlist: dependabot*
|
||||
|
||||
release-labels:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -55,7 +54,7 @@ jobs:
|
||||
if: ${{ github.event_name == 'pull_request_target' && success() && !github.event.pull_request.draft }}
|
||||
steps:
|
||||
- name: release-labels
|
||||
uses: actions/github-script@v7
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
# This script ensures PR title and labels are in sync:
|
||||
#
|
||||
|
||||
@@ -32,10 +32,10 @@ jobs:
|
||||
if: github.repository_owner == 'coder'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Docker login
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
|
||||
@@ -7,26 +7,20 @@ on:
|
||||
paths:
|
||||
- "dogfood/**"
|
||||
- ".github/workflows/dogfood.yaml"
|
||||
- "flake.lock"
|
||||
- "flake.nix"
|
||||
pull_request:
|
||||
paths:
|
||||
- "dogfood/**"
|
||||
- ".github/workflows/dogfood.yaml"
|
||||
- "flake.lock"
|
||||
- "flake.nix"
|
||||
# Uncomment these lines when testing with CI.
|
||||
# pull_request:
|
||||
# paths:
|
||||
# - "dogfood/**"
|
||||
# - ".github/workflows/dogfood.yaml"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build_image:
|
||||
runs-on: ubuntu-latest
|
||||
deploy_image:
|
||||
runs-on: buildjet-4vcpu-ubuntu-2204
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Get branch name
|
||||
id: branch-name
|
||||
uses: tj-actions/branch-names@v8
|
||||
uses: tj-actions/branch-names@v6.5
|
||||
|
||||
- name: "Branch name to Docker tag name"
|
||||
id: docker-tag-name
|
||||
@@ -36,80 +30,51 @@ jobs:
|
||||
tag=${tag//\//--}
|
||||
echo "tag=${tag}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Depot CLI
|
||||
uses: depot/setup-action@v1
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: github.ref == 'refs/heads/main'
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and push Non-Nix image
|
||||
uses: depot/build-push-action@v1
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
project: b4q6ltmpzh
|
||||
token: ${{ secrets.DEPOT_TOKEN }}
|
||||
buildx-fallback: true
|
||||
context: "{{defaultContext}}:dogfood"
|
||||
pull: true
|
||||
save: true
|
||||
push: ${{ github.ref == 'refs/heads/main' }}
|
||||
push: true
|
||||
tags: "codercom/oss-dogfood:${{ steps.docker-tag-name.outputs.tag }},codercom/oss-dogfood:latest"
|
||||
|
||||
- name: Build and push Nix image
|
||||
uses: depot/build-push-action@v1
|
||||
with:
|
||||
project: b4q6ltmpzh
|
||||
token: ${{ secrets.DEPOT_TOKEN }}
|
||||
buildx-fallback: true
|
||||
context: "."
|
||||
file: "dogfood/Dockerfile.nix"
|
||||
pull: true
|
||||
save: true
|
||||
push: ${{ github.ref == 'refs/heads/main' }}
|
||||
tags: "codercom/oss-dogfood-nix:${{ steps.docker-tag-name.outputs.tag }},codercom/oss-dogfood-nix:latest"
|
||||
cache-from: type=registry,ref=codercom/oss-dogfood:latest
|
||||
cache-to: type=inline
|
||||
|
||||
deploy_template:
|
||||
needs: build_image
|
||||
needs: deploy_image
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Terraform
|
||||
uses: ./.github/actions/setup-tf
|
||||
|
||||
- name: Terraform init and validate
|
||||
run: |
|
||||
cd dogfood
|
||||
terraform init -upgrade
|
||||
terraform validate
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Get short commit SHA
|
||||
if: github.ref == 'refs/heads/main'
|
||||
id: vars
|
||||
run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get latest commit title
|
||||
if: github.ref == 'refs/heads/main'
|
||||
id: message
|
||||
run: echo "pr_title=$(git log --format=%s -n 1 ${{ github.sha }})" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "Get latest Coder binary from the server"
|
||||
if: github.ref == 'refs/heads/main'
|
||||
run: |
|
||||
curl -fsSL "https://dev.coder.com/bin/coder-linux-amd64" -o "./coder"
|
||||
chmod +x "./coder"
|
||||
|
||||
- name: "Push template"
|
||||
if: github.ref == 'refs/heads/main'
|
||||
run: |
|
||||
./coder templates push $CODER_TEMPLATE_NAME --directory $CODER_TEMPLATE_DIR --yes --name=$CODER_TEMPLATE_VERSION --message="$CODER_TEMPLATE_MESSAGE" --variable jfrog_url=${{ secrets.JFROG_URL }}
|
||||
./coder templates push $CODER_TEMPLATE_NAME --directory $CODER_TEMPLATE_DIR --yes --name=$CODER_TEMPLATE_VERSION --message="$CODER_TEMPLATE_MESSAGE"
|
||||
env:
|
||||
# Consumed by Coder CLI
|
||||
CODER_URL: https://dev.coder.com
|
||||
|
||||
@@ -17,7 +17,7 @@ jobs:
|
||||
timeout-minutes: 240
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
@@ -44,7 +44,7 @@ jobs:
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
@@ -14,4 +14,4 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Assign author
|
||||
uses: toshimaru/auto-author-assign@v2.1.0
|
||||
uses: toshimaru/auto-author-assign@v1.6.2
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
name: pr-cleanup
|
||||
name: Cleanup PR deployment and image
|
||||
on:
|
||||
pull_request:
|
||||
types: closed
|
||||
@@ -35,14 +35,14 @@ jobs:
|
||||
|
||||
- name: Set up kubeconfig
|
||||
run: |
|
||||
set -euo pipefail
|
||||
set -euxo pipefail
|
||||
mkdir -p ~/.kube
|
||||
echo "${{ secrets.PR_DEPLOYMENTS_KUBECONFIG }}" > ~/.kube/config
|
||||
export KUBECONFIG=~/.kube/config
|
||||
|
||||
- name: Delete helm release
|
||||
run: |
|
||||
set -euo pipefail
|
||||
set -euxo pipefail
|
||||
helm delete --namespace "pr${{ steps.pr_number.outputs.PR_NUMBER }}" "pr${{ steps.pr_number.outputs.PR_NUMBER }}" || echo "helm release not found"
|
||||
|
||||
- name: "Remove PR namespace"
|
||||
@@ -51,7 +51,7 @@ jobs:
|
||||
|
||||
- name: "Remove DNS records"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
set -euxo pipefail
|
||||
# Get identifier for the record
|
||||
record_id=$(curl -X GET "https://api.cloudflare.com/client/v4/zones/${{ secrets.PR_DEPLOYMENTS_ZONE_ID }}/dns_records?name=%2A.pr${{ steps.pr_number.outputs.PR_NUMBER }}.${{ secrets.PR_DEPLOYMENTS_DOMAIN }}" \
|
||||
-H "Authorization: Bearer ${{ secrets.PR_DEPLOYMENTS_CLOUDFLARE_API_TOKEN }}" \
|
||||
|
||||
@@ -9,6 +9,10 @@ on:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
pr_number:
|
||||
description: "PR number"
|
||||
type: number
|
||||
required: true
|
||||
experiments:
|
||||
description: "Experiments to enable"
|
||||
required: false
|
||||
@@ -33,6 +37,10 @@ permissions:
|
||||
packages: write
|
||||
pull-requests: write # needed for commenting on PRs
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check_pr:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -40,7 +48,7 @@ jobs:
|
||||
PR_OPEN: ${{ steps.check_pr.outputs.pr_open }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Check if PR is open
|
||||
id: check_pr
|
||||
@@ -65,12 +73,12 @@ jobs:
|
||||
CODER_BASE_IMAGE_TAG: ${{ steps.set_tags.outputs.CODER_BASE_IMAGE_TAG }}
|
||||
CODER_IMAGE_TAG: ${{ steps.set_tags.outputs.CODER_IMAGE_TAG }}
|
||||
NEW: ${{ steps.check_deployment.outputs.NEW }}
|
||||
BUILD: ${{ steps.build_conditionals.outputs.first_or_force_build == 'true' || steps.build_conditionals.outputs.automatic_rebuild == 'true' }}
|
||||
BUILD: ${{ steps.build_conditionals.outputs.first_or_force_build || steps.build_conditionals.outputs.automatic_rebuild }}
|
||||
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -102,7 +110,6 @@ jobs:
|
||||
set -euo pipefail
|
||||
mkdir -p ~/.kube
|
||||
echo "${{ secrets.PR_DEPLOYMENTS_KUBECONFIG }}" > ~/.kube/config
|
||||
chmod 644 ~/.kube/config
|
||||
export KUBECONFIG=~/.kube/config
|
||||
|
||||
- name: Check if the helm deployment already exists
|
||||
@@ -119,7 +126,7 @@ jobs:
|
||||
echo "NEW=$NEW" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Check changed files
|
||||
uses: dorny/paths-filter@v3
|
||||
uses: dorny/paths-filter@v2
|
||||
id: filter
|
||||
with:
|
||||
base: ${{ github.ref }}
|
||||
@@ -158,12 +165,12 @@ jobs:
|
||||
echo "automatic_rebuild=${{ steps.check_deployment.outputs.NEW == 'false' && steps.filter.outputs.all_count > steps.filter.outputs.ignored_count }}" >> $GITHUB_OUTPUT
|
||||
|
||||
comment-pr:
|
||||
needs: get_info
|
||||
needs: [check_pr, get_info]
|
||||
if: needs.get_info.outputs.BUILD == 'true' || github.event.inputs.deploy == 'true'
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- name: Find Comment
|
||||
uses: peter-evans/find-comment@v3
|
||||
uses: peter-evans/find-comment@v2
|
||||
id: fc
|
||||
with:
|
||||
issue-number: ${{ needs.get_info.outputs.PR_NUMBER }}
|
||||
@@ -173,7 +180,7 @@ jobs:
|
||||
|
||||
- name: Comment on PR
|
||||
id: comment_id
|
||||
uses: peter-evans/create-or-update-comment@v4
|
||||
uses: peter-evans/create-or-update-comment@v3
|
||||
with:
|
||||
comment-id: ${{ steps.fc.outputs.comment-id }}
|
||||
issue-number: ${{ needs.get_info.outputs.PR_NUMBER }}
|
||||
@@ -190,16 +197,12 @@ jobs:
|
||||
# Run build job only if there are changes in the files that we care about or if the workflow is manually triggered with --build flag
|
||||
if: needs.get_info.outputs.BUILD == 'true'
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
# This concurrency only cancels build jobs if a new build is triggred. It will avoid cancelling the current deployemtn in case of docs chnages.
|
||||
concurrency:
|
||||
group: build-${{ github.workflow }}-${{ github.ref }}-${{ needs.get_info.outputs.BUILD }}
|
||||
cancel-in-progress: true
|
||||
env:
|
||||
DOCKER_CLI_EXPERIMENTAL: "enabled"
|
||||
CODER_IMAGE_TAG: ${{ needs.get_info.outputs.CODER_IMAGE_TAG }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -213,7 +216,7 @@ jobs:
|
||||
uses: ./.github/actions/setup-sqlc
|
||||
|
||||
- name: GHCR Login
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
@@ -254,7 +257,6 @@ jobs:
|
||||
set -euo pipefail
|
||||
mkdir -p ~/.kube
|
||||
echo "${{ secrets.PR_DEPLOYMENTS_KUBECONFIG }}" > ~/.kube/config
|
||||
chmod 644 ~/.kube/config
|
||||
export KUBECONFIG=~/.kube/config
|
||||
|
||||
- name: Check if image exists
|
||||
@@ -294,7 +296,7 @@ jobs:
|
||||
kubectl create namespace "pr${{ env.PR_NUMBER }}"
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Check and Create Certificate
|
||||
if: needs.get_info.outputs.NEW == 'true' || github.event.inputs.deploy == 'true'
|
||||
@@ -351,7 +353,6 @@ jobs:
|
||||
- name: Install/Upgrade Helm chart
|
||||
run: |
|
||||
set -euo pipefail
|
||||
helm dependency update --skip-refresh ./helm/coder
|
||||
helm upgrade --install "pr${{ env.PR_NUMBER }}" ./helm/coder \
|
||||
--namespace "pr${{ env.PR_NUMBER }}" \
|
||||
--values ./pr-deploy-values.yaml \
|
||||
@@ -416,7 +417,8 @@ jobs:
|
||||
|
||||
# Create template
|
||||
cd ./.github/pr-deployments/template
|
||||
coder templates push -y --variable namespace=pr${{ env.PR_NUMBER }} kubernetes
|
||||
terraform init
|
||||
coder templates create -y --variable namespace=pr${{ env.PR_NUMBER }} kubernetes
|
||||
|
||||
# Create workspace
|
||||
coder create --template="kubernetes" kube --parameter cpu=2 --parameter memory=4 --parameter home_disk_size=2 -y
|
||||
@@ -441,7 +443,7 @@ jobs:
|
||||
echo "Slack notification sent"
|
||||
|
||||
- name: Find Comment
|
||||
uses: peter-evans/find-comment@v3
|
||||
uses: peter-evans/find-comment@v2
|
||||
id: fc
|
||||
with:
|
||||
issue-number: ${{ env.PR_NUMBER }}
|
||||
@@ -450,7 +452,7 @@ jobs:
|
||||
direction: last
|
||||
|
||||
- name: Comment on PR
|
||||
uses: peter-evans/create-or-update-comment@v4
|
||||
uses: peter-evans/create-or-update-comment@v3
|
||||
env:
|
||||
STATUS: ${{ needs.get_info.outputs.NEW == 'true' && 'Created' || 'Updated' }}
|
||||
with:
|
||||
|
||||
+72
-116
@@ -40,7 +40,7 @@ jobs:
|
||||
version: ${{ steps.version.outputs.version }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -85,7 +85,7 @@ jobs:
|
||||
cat "$CODER_RELEASE_NOTES_FILE"
|
||||
|
||||
- name: Docker Login
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
@@ -103,7 +103,7 @@ jobs:
|
||||
- name: Install nfpm
|
||||
run: |
|
||||
set -euo pipefail
|
||||
wget -O /tmp/nfpm.deb https://github.com/goreleaser/nfpm/releases/download/v2.35.1/nfpm_2.35.1_amd64.deb
|
||||
wget -O /tmp/nfpm.deb https://github.com/goreleaser/nfpm/releases/download/v2.18.1/nfpm_amd64.deb
|
||||
sudo dpkg -i /tmp/nfpm.deb
|
||||
rm /tmp/nfpm.deb
|
||||
|
||||
@@ -281,13 +281,13 @@ jobs:
|
||||
CODER_GPG_RELEASE_KEY_BASE64: ${{ secrets.GPG_RELEASE_KEY_BASE64 }}
|
||||
|
||||
- name: Authenticate to Google Cloud
|
||||
uses: google-github-actions/auth@v2
|
||||
uses: google-github-actions/auth@v1
|
||||
with:
|
||||
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_ID_PROVIDER }}
|
||||
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
|
||||
|
||||
- name: Setup GCloud SDK
|
||||
uses: "google-github-actions/setup-gcloud@v2"
|
||||
uses: "google-github-actions/setup-gcloud@v1"
|
||||
|
||||
- name: Publish Helm Chart
|
||||
if: ${{ !inputs.dry_run }}
|
||||
@@ -306,7 +306,7 @@ jobs:
|
||||
|
||||
- name: Upload artifacts to actions (if dry-run)
|
||||
if: ${{ inputs.dry_run }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: release-artifacts
|
||||
path: |
|
||||
@@ -321,100 +321,21 @@ jobs:
|
||||
|
||||
- name: Start Packer builds
|
||||
if: ${{ !inputs.dry_run }}
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
uses: peter-evans/repository-dispatch@v2
|
||||
with:
|
||||
token: ${{ secrets.CDRCI_GITHUB_TOKEN }}
|
||||
repository: coder/packages
|
||||
event-type: coder-release
|
||||
client-payload: '{"coder_version": "${{ steps.version.outputs.version }}"}'
|
||||
|
||||
publish-homebrew:
|
||||
name: Publish to Homebrew tap
|
||||
runs-on: ubuntu-latest
|
||||
needs: release
|
||||
if: ${{ !inputs.dry_run }}
|
||||
|
||||
steps:
|
||||
# TODO: skip this if it's not a new release (i.e. a backport). This is
|
||||
# fine right now because it just makes a PR that we can close.
|
||||
- name: Update homebrew
|
||||
env:
|
||||
# Variables used by the `gh` command
|
||||
GH_REPO: coder/homebrew-coder
|
||||
GH_TOKEN: ${{ secrets.CDRCI_GITHUB_TOKEN }}
|
||||
run: |
|
||||
# Keep version number around for reference, removing any potential leading v
|
||||
coder_version="$(echo "${{ needs.release.outputs.version }}" | tr -d v)"
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
# Setup Git
|
||||
git config --global user.email "ci@coder.com"
|
||||
git config --global user.name "Coder CI"
|
||||
git config --global credential.helper "store"
|
||||
|
||||
temp_dir="$(mktemp -d)"
|
||||
cd "$temp_dir"
|
||||
|
||||
# Download checksums
|
||||
checksums_url="$(gh release view --repo coder/coder "v$coder_version" --json assets \
|
||||
| jq -r ".assets | map(.url) | .[]" \
|
||||
| grep -e ".checksums.txt\$")"
|
||||
wget "$checksums_url" -O checksums.txt
|
||||
|
||||
# Get the SHAs
|
||||
darwin_arm_sha="$(cat checksums.txt | grep "darwin_arm64.zip" | awk '{ print $1 }')"
|
||||
darwin_intel_sha="$(cat checksums.txt | grep "darwin_amd64.zip" | awk '{ print $1 }')"
|
||||
linux_sha="$(cat checksums.txt | grep "linux_amd64.tar.gz" | awk '{ print $1 }')"
|
||||
|
||||
echo "macOS arm64: $darwin_arm_sha"
|
||||
echo "macOS amd64: $darwin_intel_sha"
|
||||
echo "Linux amd64: $linux_sha"
|
||||
|
||||
# Check out the homebrew repo
|
||||
git clone "https://github.com/$GH_REPO" homebrew-coder
|
||||
brew_branch="auto-release/$coder_version"
|
||||
cd homebrew-coder
|
||||
|
||||
# Check if a PR already exists.
|
||||
pr_count="$(gh pr list --search "head:$brew_branch" --json id,closed | jq -r ".[] | select(.closed == false) | .id" | wc -l)"
|
||||
if [[ "$pr_count" > 0 ]]; then
|
||||
echo "Bailing out as PR already exists" 2>&1
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Set up cdrci credentials for pushing to homebrew-coder
|
||||
echo "https://x-access-token:$GH_TOKEN@github.com" >> ~/.git-credentials
|
||||
# Update the formulae and push
|
||||
git checkout -b "$brew_branch"
|
||||
./scripts/update-v2.sh "$coder_version" "$darwin_arm_sha" "$darwin_intel_sha" "$linux_sha"
|
||||
git add .
|
||||
git commit -m "coder $coder_version"
|
||||
git push -u origin -f "$brew_branch"
|
||||
|
||||
# Create PR
|
||||
gh pr create \
|
||||
-B master -H "$brew_branch" \
|
||||
-t "coder $coder_version" \
|
||||
-b "" \
|
||||
-r "${{ github.actor }}" \
|
||||
-a "${{ github.actor }}" \
|
||||
-b "This automatic PR was triggered by the release of Coder v$coder_version"
|
||||
|
||||
publish-winget:
|
||||
name: Publish to winget-pkgs
|
||||
runs-on: windows-latest
|
||||
needs: release
|
||||
if: ${{ !inputs.dry_run }}
|
||||
|
||||
steps:
|
||||
- name: Sync fork
|
||||
run: gh repo sync cdrci/winget-pkgs -b master
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.WINGET_GH_TOKEN }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -439,26 +360,27 @@ jobs:
|
||||
|
||||
$release_assets = gh release view --repo coder/coder "v${version}" --json assets | `
|
||||
ConvertFrom-Json
|
||||
# Get the installer URLs from the release assets.
|
||||
$amd64_installer_url = $release_assets.assets | `
|
||||
# Get the installer URL from the release assets.
|
||||
$installer_url = $release_assets.assets | `
|
||||
Where-Object name -Match ".*_windows_amd64_installer.exe$" | `
|
||||
Select -ExpandProperty url
|
||||
$amd64_zip_url = $release_assets.assets | `
|
||||
Where-Object name -Match ".*_windows_amd64.zip$" | `
|
||||
Select -ExpandProperty url
|
||||
$arm64_zip_url = $release_assets.assets | `
|
||||
Where-Object name -Match ".*_windows_arm64.zip$" | `
|
||||
Select -ExpandProperty url
|
||||
|
||||
echo "amd64 Installer URL: ${amd64_installer_url}"
|
||||
echo "amd64 zip URL: ${amd64_zip_url}"
|
||||
echo "arm64 zip URL: ${arm64_zip_url}"
|
||||
echo "Installer URL: ${installer_url}"
|
||||
echo "Package version: ${version}"
|
||||
|
||||
# The URL "|X64" suffix forces the architecture as it cannot be
|
||||
# sniffed properly from the URL. wingetcreate checks both the URL and
|
||||
# binary magic bytes for the architecture and they need to both match,
|
||||
# but they only check for `x64`, `win64` and `_64` in the URL. Our URL
|
||||
# contains `amd64` which doesn't match sadly.
|
||||
#
|
||||
# wingetcreate will still do the binary magic bytes check, so if we
|
||||
# accidentally change the architecture of the installer, it will fail
|
||||
# submission.
|
||||
.\wingetcreate.exe update Coder.Coder `
|
||||
--submit `
|
||||
--version "${version}" `
|
||||
--urls "${amd64_installer_url}" "${amd64_zip_url}" "${arm64_zip_url}" `
|
||||
--urls "${installer_url}|X64" `
|
||||
--token "$env:WINGET_GH_TOKEN"
|
||||
|
||||
env:
|
||||
@@ -485,28 +407,62 @@ jobs:
|
||||
# different repo.
|
||||
GH_TOKEN: ${{ secrets.CDRCI_GITHUB_TOKEN }}
|
||||
|
||||
# publish-sqlc pushes the latest schema to sqlc cloud.
|
||||
# At present these pushes cannot be tagged, so the last push is always the latest.
|
||||
publish-sqlc:
|
||||
name: "Publish to schema sqlc cloud"
|
||||
runs-on: "ubuntu-latest"
|
||||
publish-chocolatey:
|
||||
name: Publish to Chocolatey
|
||||
runs-on: windows-latest
|
||||
needs: release
|
||||
if: ${{ !inputs.dry_run }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
fetch-depth: 0
|
||||
|
||||
# We need golang to run the migration main.go
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
# Same reason as for release.
|
||||
- name: Fetch git tags
|
||||
run: git fetch --tags --force
|
||||
|
||||
- name: Setup sqlc
|
||||
uses: ./.github/actions/setup-sqlc
|
||||
|
||||
- name: Push schema to sqlc cloud
|
||||
# Don't block a release on this
|
||||
continue-on-error: true
|
||||
# From https://chocolatey.org
|
||||
- name: Install Chocolatey
|
||||
run: |
|
||||
make sqlc-push
|
||||
Set-ExecutionPolicy Bypass -Scope Process -Force
|
||||
[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072
|
||||
|
||||
iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
|
||||
|
||||
- name: Build chocolatey package
|
||||
run: |
|
||||
cd scripts/chocolatey
|
||||
|
||||
# The package version is the same as the tag minus the leading "v".
|
||||
# The version in this output already has the leading "v" removed but
|
||||
# we do it again to be safe.
|
||||
$version = "${{ needs.release.outputs.version }}".Trim('v')
|
||||
|
||||
$release_assets = gh release view --repo coder/coder "v${version}" --json assets | `
|
||||
ConvertFrom-Json
|
||||
|
||||
# Get the URL for the Windows ZIP from the release assets.
|
||||
$zip_url = $release_assets.assets | `
|
||||
Where-Object name -Match ".*_windows_amd64.zip$" | `
|
||||
Select -ExpandProperty url
|
||||
|
||||
echo "ZIP URL: ${zip_url}"
|
||||
echo "Package version: ${version}"
|
||||
|
||||
echo "Downloading ZIP..."
|
||||
Invoke-WebRequest $zip_url -OutFile assets.zip
|
||||
|
||||
echo "Extracting ZIP..."
|
||||
Expand-Archive assets.zip -DestinationPath assets/
|
||||
|
||||
# No need to specify nuspec if there's only one in the directory.
|
||||
choco pack --version=$version binary_path=assets/coder.exe
|
||||
|
||||
choco apikey --api-key $env:CHOCO_API_KEY --source https://push.chocolatey.org/
|
||||
|
||||
# No need to specify nupkg if there's only one in the directory.
|
||||
choco push --source https://push.chocolatey.org/
|
||||
|
||||
env:
|
||||
CHOCO_API_KEY: ${{ secrets.CHOCO_API_KEY }}
|
||||
|
||||
@@ -26,10 +26,10 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: go, javascript
|
||||
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
rm Makefile
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
uses: github/codeql-action/analyze@v2
|
||||
|
||||
- name: Send Slack notification on failure
|
||||
if: ${{ failure() }}
|
||||
@@ -59,7 +59,7 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -75,7 +75,7 @@ jobs:
|
||||
- name: Install yq
|
||||
run: go run github.com/mikefarah/yq/v4@v4.30.6
|
||||
- name: Install mockgen
|
||||
run: go install go.uber.org/mock/mockgen@v0.4.0
|
||||
run: go install github.com/golang/mock/mockgen@v1.6.0
|
||||
- name: Install protoc-gen-go
|
||||
run: go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.30
|
||||
- name: Install protoc-gen-go-drpc
|
||||
@@ -122,7 +122,7 @@ jobs:
|
||||
image_name: ${{ steps.build.outputs.image }}
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@d43c1f16c00cfd3978dde6c07f4bbcf9eb6993ca
|
||||
uses: aquasecurity/trivy-action@41f05d9ecffa2ed3f1580af306000f734b733e54
|
||||
with:
|
||||
image-ref: ${{ steps.build.outputs.image }}
|
||||
format: sarif
|
||||
@@ -130,13 +130,13 @@ jobs:
|
||||
severity: "CRITICAL,HIGH"
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
uses: github/codeql-action/upload-sarif@v2
|
||||
with:
|
||||
sarif_file: trivy-results.sarif
|
||||
category: "Trivy"
|
||||
|
||||
- name: Upload Trivy scan results as an artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: trivy
|
||||
path: trivy-results.sarif
|
||||
|
||||
@@ -13,7 +13,7 @@ jobs:
|
||||
actions: write
|
||||
steps:
|
||||
- name: stale
|
||||
uses: actions/stale@v9.0.0
|
||||
uses: actions/stale@v8.0.0
|
||||
with:
|
||||
stale-issue-label: "stale"
|
||||
stale-pr-label: "stale"
|
||||
@@ -30,57 +30,11 @@ jobs:
|
||||
operations-per-run: 60
|
||||
# Start with the oldest issues, always.
|
||||
ascending: true
|
||||
- name: "Close old issues labeled likely-no"
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
const thirtyDaysAgo = new Date(new Date().setDate(new Date().getDate() - 30));
|
||||
console.log(`Looking for issues labeled with 'likely-no' more than 30 days ago, which is after ${thirtyDaysAgo.toISOString()}`);
|
||||
|
||||
const issues = await github.rest.issues.listForRepo({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
labels: 'likely-no',
|
||||
state: 'open',
|
||||
});
|
||||
|
||||
console.log(`Found ${issues.data.length} open issues labeled with 'likely-no'`);
|
||||
|
||||
for (const issue of issues.data) {
|
||||
console.log(`Checking issue #${issue.number} created at ${issue.created_at}`);
|
||||
|
||||
const timeline = await github.rest.issues.listEventsForTimeline({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: issue.number,
|
||||
});
|
||||
|
||||
const labelEvent = timeline.data.find(event => event.event === 'labeled' && event.label.name === 'likely-no');
|
||||
|
||||
if (labelEvent) {
|
||||
console.log(`Issue #${issue.number} was labeled with 'likely-no' at ${labelEvent.created_at}`);
|
||||
|
||||
if (new Date(labelEvent.created_at) < thirtyDaysAgo) {
|
||||
console.log(`Issue #${issue.number} is older than 30 days with 'likely-no' label, closing issue.`);
|
||||
await github.rest.issues.update({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: issue.number,
|
||||
state: 'closed',
|
||||
state_reason: 'not_planned'
|
||||
});
|
||||
}
|
||||
} else {
|
||||
console.log(`Issue #${issue.number} does not have a 'likely-no' label event in its timeline.`);
|
||||
}
|
||||
}
|
||||
|
||||
branches:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
- name: Run delete-old-branches-action
|
||||
uses: beatlabs/delete-old-branches-action@v0.0.10
|
||||
with:
|
||||
@@ -98,8 +52,8 @@ jobs:
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
repository: ${{ github.repository }}
|
||||
retain_days: 30
|
||||
keep_minimum_runs: 30
|
||||
retain_days: 1
|
||||
keep_minimum_runs: 1
|
||||
delete_workflow_pattern: pr-cleanup.yaml
|
||||
|
||||
- name: Delete PR Deploy workflow skipped runs
|
||||
@@ -107,6 +61,7 @@ jobs:
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
repository: ${{ github.repository }}
|
||||
retain_days: 30
|
||||
keep_minimum_runs: 30
|
||||
retain_days: 0
|
||||
keep_minimum_runs: 0
|
||||
delete_run_by_conclusion_pattern: skipped
|
||||
delete_workflow_pattern: pr-deploy.yaml
|
||||
|
||||
@@ -14,7 +14,6 @@ darcula = "darcula"
|
||||
Hashi = "Hashi"
|
||||
trialer = "trialer"
|
||||
encrypter = "encrypter"
|
||||
hel = "hel" # as in helsinki
|
||||
|
||||
[files]
|
||||
extend-exclude = [
|
||||
@@ -30,6 +29,4 @@ extend-exclude = [
|
||||
"**/*_test.go",
|
||||
"**/*.test.tsx",
|
||||
"**/pnpm-lock.yaml",
|
||||
"tailnet/testdata/**",
|
||||
"site/src/pages/SetupPage/countries.tsx",
|
||||
]
|
||||
|
||||
+5
-12
@@ -20,6 +20,7 @@ yarn-error.log
|
||||
|
||||
# Front-end ignore patterns.
|
||||
.next/
|
||||
site/**/*.typegen.ts
|
||||
site/build-storybook.log
|
||||
site/coverage/
|
||||
site/storybook-static/
|
||||
@@ -29,14 +30,15 @@ site/e2e/states/*.json
|
||||
site/e2e/.auth.json
|
||||
site/playwright-report/*
|
||||
site/.swc
|
||||
site/dist/
|
||||
|
||||
# Make target for updating golden files (any dir).
|
||||
.gen-golden
|
||||
|
||||
# Build
|
||||
build/
|
||||
dist/
|
||||
out/
|
||||
/build/
|
||||
/dist/
|
||||
site/out/
|
||||
|
||||
# Bundle analysis
|
||||
site/stats/
|
||||
@@ -59,12 +61,3 @@ site/stats/
|
||||
./scaletest/terraform/.terraform.lock.hcl
|
||||
scaletest/terraform/secrets.tfvars
|
||||
.terraform.tfstate.*
|
||||
|
||||
# Nix
|
||||
result
|
||||
|
||||
# Data dumps from unit tests
|
||||
**/*.test.sql
|
||||
|
||||
# Filebrowser.db
|
||||
**/filebrowser.db
|
||||
|
||||
+7
-17
@@ -2,19 +2,12 @@
|
||||
# Over time we should try tightening some of these.
|
||||
|
||||
linters-settings:
|
||||
dupl:
|
||||
# goal: 100
|
||||
threshold: 412
|
||||
|
||||
exhaustruct:
|
||||
include:
|
||||
# Gradually extend to cover more of the codebase.
|
||||
- 'httpmw\.\w+'
|
||||
# We want to enforce all values are specified when inserting or updating
|
||||
# a database row. Ref: #9936
|
||||
- 'github.com/coder/coder/v2/coderd/database\.[^G][^e][^t]\w+Params'
|
||||
gocognit:
|
||||
min-complexity: 300
|
||||
min-complexity: 46 # Min code complexity (def 30).
|
||||
|
||||
goconst:
|
||||
min-len: 4 # Min length of string consts (def 3).
|
||||
@@ -125,6 +118,10 @@ linters-settings:
|
||||
goimports:
|
||||
local-prefixes: coder.com,cdr.dev,go.coder.com,github.com/cdr,github.com/coder
|
||||
|
||||
gocyclo:
|
||||
# goal: 30
|
||||
min-complexity: 47
|
||||
|
||||
importas:
|
||||
no-unaliased: true
|
||||
|
||||
@@ -134,8 +131,7 @@ linters-settings:
|
||||
- trialer
|
||||
|
||||
nestif:
|
||||
# goal: 10
|
||||
min-complexity: 20
|
||||
min-complexity: 4 # Min complexity of if statements (def 5, goal 4)
|
||||
|
||||
revive:
|
||||
# see https://github.com/mgechev/revive#available-rules for details.
|
||||
@@ -236,12 +232,7 @@ linters:
|
||||
- exportloopref
|
||||
- forcetypeassert
|
||||
- gocritic
|
||||
# gocyclo is may be useful in the future when we start caring
|
||||
# about testing complexity, but for the time being we should
|
||||
# create a good culture around cognitive complexity.
|
||||
# - gocyclo
|
||||
- gocognit
|
||||
- nestif
|
||||
- gocyclo
|
||||
- goimports
|
||||
- gomodguard
|
||||
- gosec
|
||||
@@ -277,4 +268,3 @@ linters:
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
- dupl
|
||||
|
||||
+5
-17
@@ -23,6 +23,7 @@ yarn-error.log
|
||||
|
||||
# Front-end ignore patterns.
|
||||
.next/
|
||||
site/**/*.typegen.ts
|
||||
site/build-storybook.log
|
||||
site/coverage/
|
||||
site/storybook-static/
|
||||
@@ -32,14 +33,15 @@ site/e2e/states/*.json
|
||||
site/e2e/.auth.json
|
||||
site/playwright-report/*
|
||||
site/.swc
|
||||
site/dist/
|
||||
|
||||
# Make target for updating golden files (any dir).
|
||||
.gen-golden
|
||||
|
||||
# Build
|
||||
build/
|
||||
dist/
|
||||
out/
|
||||
/build/
|
||||
/dist/
|
||||
site/out/
|
||||
|
||||
# Bundle analysis
|
||||
site/stats/
|
||||
@@ -62,15 +64,6 @@ site/stats/
|
||||
./scaletest/terraform/.terraform.lock.hcl
|
||||
scaletest/terraform/secrets.tfvars
|
||||
.terraform.tfstate.*
|
||||
|
||||
# Nix
|
||||
result
|
||||
|
||||
# Data dumps from unit tests
|
||||
**/*.test.sql
|
||||
|
||||
# Filebrowser.db
|
||||
**/filebrowser.db
|
||||
# .prettierignore.include:
|
||||
# Helm templates contain variables that are invalid YAML and can't be formatted
|
||||
# by Prettier.
|
||||
@@ -82,13 +75,8 @@ helm/**/templates/*.yaml
|
||||
|
||||
# Testdata shouldn't be formatted.
|
||||
scripts/apitypings/testdata/**/*.ts
|
||||
enterprise/tailnet/testdata/*.golden.html
|
||||
tailnet/testdata/*.golden.html
|
||||
|
||||
# Generated files shouldn't be formatted.
|
||||
site/e2e/provisionerGenerated.ts
|
||||
|
||||
**/pnpm-lock.yaml
|
||||
|
||||
# Ignore generated JSON (e.g. examples/examples.gen.json).
|
||||
**/*.gen.json
|
||||
|
||||
@@ -8,13 +8,8 @@ helm/**/templates/*.yaml
|
||||
|
||||
# Testdata shouldn't be formatted.
|
||||
scripts/apitypings/testdata/**/*.ts
|
||||
enterprise/tailnet/testdata/*.golden.html
|
||||
tailnet/testdata/*.golden.html
|
||||
|
||||
# Generated files shouldn't be formatted.
|
||||
site/e2e/provisionerGenerated.ts
|
||||
|
||||
**/pnpm-lock.yaml
|
||||
|
||||
# Ignore generated JSON (e.g. examples/examples.gen.json).
|
||||
**/*.gen.json
|
||||
|
||||
+7
-7
@@ -1,18 +1,18 @@
|
||||
# This config file is used in conjunction with `.editorconfig` to specify
|
||||
# formatting for prettier-supported files. See `.editorconfig` and
|
||||
# `site/.editorconfig` for whitespace formatting options.
|
||||
# `site/.editorconfig`for whitespace formatting options.
|
||||
printWidth: 80
|
||||
proseWrap: always
|
||||
semi: false
|
||||
trailingComma: all
|
||||
useTabs: false
|
||||
tabWidth: 2
|
||||
overrides:
|
||||
- files:
|
||||
- README.md
|
||||
- docs/api/**/*.md
|
||||
- docs/cli/**/*.md
|
||||
- docs/changelogs/*.md
|
||||
- .github/**/*.{yaml,yml,toml}
|
||||
- scripts/**/*.{yaml,yml,toml}
|
||||
options:
|
||||
proseWrap: preserve
|
||||
- files:
|
||||
- "site/**/*.yaml"
|
||||
- "site/**/*.yml"
|
||||
options:
|
||||
proseWrap: always
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
// Replace all NullTime with string
|
||||
replace github.com/coder/coder/v2/codersdk.NullTime string
|
||||
replace github.com/coder/coder/codersdk.NullTime string
|
||||
// Prevent swaggo from rendering enums for time.Duration
|
||||
replace time.Duration int64
|
||||
// Do not expose "echo" provider
|
||||
replace github.com/coder/coder/v2/codersdk.ProvisionerType string
|
||||
replace github.com/coder/coder/codersdk.ProvisionerType string
|
||||
// Do not render netip.Addr
|
||||
replace netip.Addr string
|
||||
|
||||
Vendored
+10
-20
@@ -18,11 +18,10 @@
|
||||
"coderdenttest",
|
||||
"coderdtest",
|
||||
"codersdk",
|
||||
"contravariance",
|
||||
"cronstrue",
|
||||
"databasefake",
|
||||
"dbfake",
|
||||
"dbgen",
|
||||
"dbmem",
|
||||
"dbtype",
|
||||
"DERP",
|
||||
"derphttp",
|
||||
@@ -40,7 +39,6 @@
|
||||
"enterprisemeta",
|
||||
"errgroup",
|
||||
"eventsourcemock",
|
||||
"externalauth",
|
||||
"Failf",
|
||||
"fatih",
|
||||
"Formik",
|
||||
@@ -60,7 +58,6 @@
|
||||
"idtoken",
|
||||
"Iflag",
|
||||
"incpatch",
|
||||
"initialisms",
|
||||
"ipnstate",
|
||||
"isatty",
|
||||
"Jobf",
|
||||
@@ -119,13 +116,13 @@
|
||||
"stretchr",
|
||||
"STTY",
|
||||
"stuntest",
|
||||
"tanstack",
|
||||
"tailbroker",
|
||||
"tailcfg",
|
||||
"tailexchange",
|
||||
"tailnet",
|
||||
"tailnettest",
|
||||
"Tailscale",
|
||||
"tanstack",
|
||||
"tbody",
|
||||
"TCGETS",
|
||||
"tcpip",
|
||||
@@ -142,7 +139,6 @@
|
||||
"tios",
|
||||
"tmpdir",
|
||||
"tokenconfig",
|
||||
"Topbar",
|
||||
"tparallel",
|
||||
"trialer",
|
||||
"trimprefix",
|
||||
@@ -170,10 +166,10 @@
|
||||
"workspaceapps",
|
||||
"workspacebuilds",
|
||||
"workspacename",
|
||||
"wsconncache",
|
||||
"wsjson",
|
||||
"xerrors",
|
||||
"xlarge",
|
||||
"xsmall",
|
||||
"xstate",
|
||||
"yamux"
|
||||
],
|
||||
"cSpell.ignorePaths": ["site/package.json", ".vscode/settings.json"],
|
||||
@@ -190,25 +186,19 @@
|
||||
]
|
||||
},
|
||||
"eslint.workingDirectories": ["./site"],
|
||||
"files.exclude": {
|
||||
"**/node_modules": true
|
||||
},
|
||||
"search.exclude": {
|
||||
"**.pb.go": true,
|
||||
"**/*.gen.json": true,
|
||||
"**/testdata/*": true,
|
||||
"**Generated.ts": true,
|
||||
"coderd/apidoc/**": true,
|
||||
"docs/api/*.md": true,
|
||||
"docs/templates/*.md": true,
|
||||
"LICENSE": true,
|
||||
"scripts/metricsdocgen/metrics": true,
|
||||
"site/out/**": true,
|
||||
"site/storybook-static/**": true,
|
||||
"**.map": true,
|
||||
"pnpm-lock.yaml": true
|
||||
"docs/api/*.md": true
|
||||
},
|
||||
// Ensure files always have a newline.
|
||||
"files.insertFinalNewline": true,
|
||||
"go.lintTool": "golangci-lint",
|
||||
"go.lintFlags": ["--fast"],
|
||||
"go.lintOnSave": "package",
|
||||
"go.coverOnSave": true,
|
||||
"go.coverageDecorator": {
|
||||
"type": "gutter",
|
||||
"coveredGutterStyle": "blockgreen",
|
||||
|
||||
@@ -50,7 +50,7 @@ endif
|
||||
# Note, all find statements should be written with `.` or `./path` as
|
||||
# the search path so that these exclusions match.
|
||||
FIND_EXCLUSIONS= \
|
||||
-not \( \( -path '*/.git/*' -o -path './build/*' -o -path './vendor/*' -o -path './.coderv2/*' -o -path '*/node_modules/*' -o -path '*/out/*' -o -path './coderd/apidoc/*' -o -path '*/.next/*' -o -path '*/.terraform/*' \) -prune \)
|
||||
-not \( \( -path '*/.git/*' -o -path './build/*' -o -path './vendor/*' -o -path './.coderv2/*' -o -path '*/node_modules/*' -o -path '*/out/*' -o -path './coderd/apidoc/*' -o -path '*/.next/*' \) -prune \)
|
||||
# Source files used for make targets, evaluated on use.
|
||||
GO_SRC_FILES := $(shell find . $(FIND_EXCLUSIONS) -type f -name '*.go' -not -name '*_test.go')
|
||||
# All the shell files in the repo, excluding ignored files.
|
||||
@@ -107,9 +107,9 @@ endif
|
||||
|
||||
|
||||
clean:
|
||||
rm -rf build/ site/build/ site/out/
|
||||
mkdir -p build/ site/out/bin/
|
||||
git restore site/out/
|
||||
rm -rf build site/out
|
||||
mkdir -p build site/out/bin
|
||||
git restore site/out
|
||||
.PHONY: clean
|
||||
|
||||
build-slim: $(CODER_SLIM_BINARIES)
|
||||
@@ -419,6 +419,7 @@ lint: lint/shellcheck lint/go lint/ts lint/helm lint/site-icons
|
||||
|
||||
lint/site-icons:
|
||||
./scripts/check_site_icons.sh
|
||||
|
||||
.PHONY: lint/site-icons
|
||||
|
||||
lint/ts:
|
||||
@@ -428,8 +429,7 @@ lint/ts:
|
||||
|
||||
lint/go:
|
||||
./scripts/check_enterprise_imports.sh
|
||||
linter_ver=$(shell egrep -o 'GOLANGCI_LINT_VERSION=\S+' dogfood/Dockerfile | cut -d '=' -f 2)
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$$linter_ver
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.53.2
|
||||
golangci-lint run
|
||||
.PHONY: lint/go
|
||||
|
||||
@@ -449,19 +449,17 @@ lint/helm:
|
||||
DB_GEN_FILES := \
|
||||
coderd/database/querier.go \
|
||||
coderd/database/unique_constraint.go \
|
||||
coderd/database/dbmem/dbmem.go \
|
||||
coderd/database/dbfake/dbfake.go \
|
||||
coderd/database/dbmetrics/dbmetrics.go \
|
||||
coderd/database/dbauthz/dbauthz.go \
|
||||
coderd/database/dbmock/dbmock.go
|
||||
|
||||
# all gen targets should be added here and to gen/mark-fresh
|
||||
gen: \
|
||||
tailnet/proto/tailnet.pb.go \
|
||||
agent/proto/agent.pb.go \
|
||||
provisionersdk/proto/provisioner.pb.go \
|
||||
provisionerd/proto/provisionerd.pb.go \
|
||||
coderd/database/dump.sql \
|
||||
$(DB_GEN_FILES) \
|
||||
provisionersdk/proto/provisioner.pb.go \
|
||||
provisionerd/proto/provisionerd.pb.go \
|
||||
site/src/api/typesGenerated.ts \
|
||||
coderd/rbac/object_gen.go \
|
||||
docs/admin/prometheus.md \
|
||||
@@ -472,25 +470,17 @@ gen: \
|
||||
.prettierignore \
|
||||
site/.prettierrc.yaml \
|
||||
site/.prettierignore \
|
||||
site/.eslintignore \
|
||||
site/e2e/provisionerGenerated.ts \
|
||||
site/src/theme/icons.json \
|
||||
examples/examples.gen.json \
|
||||
tailnet/tailnettest/coordinatormock.go \
|
||||
tailnet/tailnettest/coordinateemock.go \
|
||||
tailnet/tailnettest/multiagentmock.go
|
||||
site/.eslintignore
|
||||
.PHONY: gen
|
||||
|
||||
# Mark all generated files as fresh so make thinks they're up-to-date. This is
|
||||
# used during releases so we don't run generation scripts.
|
||||
gen/mark-fresh:
|
||||
files="\
|
||||
tailnet/proto/tailnet.pb.go \
|
||||
agent/proto/agent.pb.go \
|
||||
provisionersdk/proto/provisioner.pb.go \
|
||||
provisionerd/proto/provisionerd.pb.go \
|
||||
coderd/database/dump.sql \
|
||||
$(DB_GEN_FILES) \
|
||||
provisionersdk/proto/provisioner.pb.go \
|
||||
provisionerd/proto/provisionerd.pb.go \
|
||||
site/src/api/typesGenerated.ts \
|
||||
coderd/rbac/object_gen.go \
|
||||
docs/admin/prometheus.md \
|
||||
@@ -502,12 +492,6 @@ gen/mark-fresh:
|
||||
site/.prettierrc.yaml \
|
||||
site/.prettierignore \
|
||||
site/.eslintignore \
|
||||
site/e2e/provisionerGenerated.ts \
|
||||
site/src/theme/icons.json \
|
||||
examples/examples.gen.json \
|
||||
tailnet/tailnettest/coordinatormock.go \
|
||||
tailnet/tailnettest/coordinateemock.go \
|
||||
tailnet/tailnettest/multiagentmock.go \
|
||||
"
|
||||
for file in $$files; do
|
||||
echo "$$file"
|
||||
@@ -527,33 +511,12 @@ coderd/database/dump.sql: coderd/database/gen/dump/main.go $(wildcard coderd/dat
|
||||
go run ./coderd/database/gen/dump/main.go
|
||||
|
||||
# Generates Go code for querying the database.
|
||||
# coderd/database/queries.sql.go
|
||||
# coderd/database/models.go
|
||||
coderd/database/querier.go: coderd/database/sqlc.yaml coderd/database/dump.sql $(wildcard coderd/database/queries/*.sql)
|
||||
./coderd/database/generate.sh
|
||||
|
||||
coderd/database/dbmock/dbmock.go: coderd/database/db.go coderd/database/querier.go
|
||||
go generate ./coderd/database/dbmock/
|
||||
|
||||
tailnet/tailnettest/coordinatormock.go tailnet/tailnettest/multiagentmock.go tailnet/tailnettest/coordinateemock.go: tailnet/coordinator.go tailnet/multiagent.go
|
||||
go generate ./tailnet/tailnettest/
|
||||
|
||||
tailnet/proto/tailnet.pb.go: tailnet/proto/tailnet.proto
|
||||
protoc \
|
||||
--go_out=. \
|
||||
--go_opt=paths=source_relative \
|
||||
--go-drpc_out=. \
|
||||
--go-drpc_opt=paths=source_relative \
|
||||
./tailnet/proto/tailnet.proto
|
||||
|
||||
agent/proto/agent.pb.go: agent/proto/agent.proto
|
||||
protoc \
|
||||
--go_out=. \
|
||||
--go_opt=paths=source_relative \
|
||||
--go-drpc_out=. \
|
||||
--go-drpc_opt=paths=source_relative \
|
||||
./agent/proto/agent.proto
|
||||
|
||||
provisionersdk/proto/provisioner.pb.go: provisionersdk/proto/provisioner.proto
|
||||
protoc \
|
||||
--go_out=. \
|
||||
@@ -570,21 +533,10 @@ provisionerd/proto/provisionerd.pb.go: provisionerd/proto/provisionerd.proto
|
||||
--go-drpc_opt=paths=source_relative \
|
||||
./provisionerd/proto/provisionerd.proto
|
||||
|
||||
site/src/api/typesGenerated.ts: $(wildcard scripts/apitypings/*) $(shell find ./codersdk $(FIND_EXCLUSIONS) -type f -name '*.go')
|
||||
go run ./scripts/apitypings/ > $@
|
||||
pnpm run format:write:only "$@"
|
||||
|
||||
site/e2e/provisionerGenerated.ts: provisionerd/proto/provisionerd.pb.go provisionersdk/proto/provisioner.pb.go
|
||||
site/src/api/typesGenerated.ts: scripts/apitypings/main.go $(shell find ./codersdk $(FIND_EXCLUSIONS) -type f -name '*.go')
|
||||
go run scripts/apitypings/main.go > site/src/api/typesGenerated.ts
|
||||
cd site
|
||||
../scripts/pnpm_install.sh
|
||||
pnpm run gen:provisioner
|
||||
|
||||
site/src/theme/icons.json: $(wildcard scripts/gensite/*) $(wildcard site/static/icon/*)
|
||||
go run ./scripts/gensite/ -icons "$@"
|
||||
pnpm run format:write:only "$@"
|
||||
|
||||
examples/examples.gen.json: scripts/examplegen/main.go examples/examples.go $(shell find ./examples/templates)
|
||||
go run ./scripts/examplegen/main.go > examples/examples.gen.json
|
||||
pnpm run format:types
|
||||
|
||||
coderd/rbac/object_gen.go: scripts/rbacgen/main.go coderd/rbac/object.go
|
||||
go run scripts/rbacgen/main.go ./coderd/rbac > coderd/rbac/object_gen.go
|
||||
@@ -593,11 +545,11 @@ docs/admin/prometheus.md: scripts/metricsdocgen/main.go scripts/metricsdocgen/me
|
||||
go run scripts/metricsdocgen/main.go
|
||||
pnpm run format:write:only ./docs/admin/prometheus.md
|
||||
|
||||
docs/cli.md: scripts/clidocgen/main.go examples/examples.gen.json $(GO_SRC_FILES)
|
||||
CI=true BASE_PATH="." go run ./scripts/clidocgen
|
||||
docs/cli.md: scripts/clidocgen/main.go $(GO_SRC_FILES)
|
||||
BASE_PATH="." go run ./scripts/clidocgen
|
||||
pnpm run format:write:only ./docs/cli.md ./docs/cli/*.md ./docs/manifest.json
|
||||
|
||||
docs/admin/audit-logs.md: coderd/database/querier.go scripts/auditdocgen/main.go enterprise/audit/table.go coderd/rbac/object_gen.go
|
||||
docs/admin/audit-logs.md: scripts/auditdocgen/main.go enterprise/audit/table.go coderd/rbac/object_gen.go
|
||||
go run scripts/auditdocgen/main.go
|
||||
pnpm run format:write:only ./docs/admin/audit-logs.md
|
||||
|
||||
@@ -605,16 +557,7 @@ coderd/apidoc/swagger.json: $(shell find ./scripts/apidocgen $(FIND_EXCLUSIONS)
|
||||
./scripts/apidocgen/generate.sh
|
||||
pnpm run format:write:only ./docs/api ./docs/manifest.json ./coderd/apidoc/swagger.json
|
||||
|
||||
update-golden-files: \
|
||||
cli/testdata/.gen-golden \
|
||||
helm/coder/tests/testdata/.gen-golden \
|
||||
helm/provisioner/tests/testdata/.gen-golden \
|
||||
scripts/ci-report/testdata/.gen-golden \
|
||||
enterprise/cli/testdata/.gen-golden \
|
||||
enterprise/tailnet/testdata/.gen-golden \
|
||||
tailnet/testdata/.gen-golden \
|
||||
coderd/.gen-golden \
|
||||
provisioner/terraform/testdata/.gen-golden
|
||||
update-golden-files: cli/testdata/.gen-golden helm/coder/tests/testdata/.gen-golden helm/provisioner/tests/testdata/.gen-golden scripts/ci-report/testdata/.gen-golden enterprise/cli/testdata/.gen-golden
|
||||
.PHONY: update-golden-files
|
||||
|
||||
cli/testdata/.gen-golden: $(wildcard cli/testdata/*.golden) $(wildcard cli/*.tpl) $(GO_SRC_FILES) $(wildcard cli/*_test.go)
|
||||
@@ -625,14 +568,6 @@ enterprise/cli/testdata/.gen-golden: $(wildcard enterprise/cli/testdata/*.golden
|
||||
go test ./enterprise/cli -run="TestEnterpriseCommandHelp" -update
|
||||
touch "$@"
|
||||
|
||||
tailnet/testdata/.gen-golden: $(wildcard tailnet/testdata/*.golden.html) $(GO_SRC_FILES) $(wildcard tailnet/*_test.go)
|
||||
go test ./tailnet -run="TestDebugTemplate" -update
|
||||
touch "$@"
|
||||
|
||||
enterprise/tailnet/testdata/.gen-golden: $(wildcard enterprise/tailnet/testdata/*.golden.html) $(GO_SRC_FILES) $(wildcard enterprise/tailnet/*_test.go)
|
||||
go test ./enterprise/tailnet -run="TestDebugTemplate" -update
|
||||
touch "$@"
|
||||
|
||||
helm/coder/tests/testdata/.gen-golden: $(wildcard helm/coder/tests/testdata/*.yaml) $(wildcard helm/coder/tests/testdata/*.golden) $(GO_SRC_FILES) $(wildcard helm/coder/tests/*_test.go)
|
||||
go test ./helm/coder/tests -run=TestUpdateGoldenFiles -update
|
||||
touch "$@"
|
||||
@@ -641,14 +576,6 @@ helm/provisioner/tests/testdata/.gen-golden: $(wildcard helm/provisioner/tests/t
|
||||
go test ./helm/provisioner/tests -run=TestUpdateGoldenFiles -update
|
||||
touch "$@"
|
||||
|
||||
coderd/.gen-golden: $(wildcard coderd/testdata/*/*.golden) $(GO_SRC_FILES) $(wildcard coderd/*_test.go)
|
||||
go test ./coderd -run="Test.*Golden$$" -update
|
||||
touch "$@"
|
||||
|
||||
provisioner/terraform/testdata/.gen-golden: $(wildcard provisioner/terraform/testdata/*/*.golden) $(GO_SRC_FILES) $(wildcard provisioner/terraform/*_test.go)
|
||||
go test ./provisioner/terraform -run="Test.*Golden$$" -update
|
||||
touch "$@"
|
||||
|
||||
scripts/ci-report/testdata/.gen-golden: $(wildcard scripts/ci-report/testdata/*) $(wildcard scripts/ci-report/*.go)
|
||||
go test ./scripts/ci-report -run=TestOutputMatchesGoldenFile -update
|
||||
touch "$@"
|
||||
@@ -667,7 +594,7 @@ site/.prettierrc.yaml: .prettierrc.yaml
|
||||
# - ./ -> ../
|
||||
# - ./site -> ./
|
||||
yq \
|
||||
'.overrides[].files |= map(. | sub("^./"; "") | sub("^"; "../") | sub("../site/"; "./") | sub("../!"; "!../"))' \
|
||||
'.overrides[].files |= map(. | sub("^./"; "") | sub("^"; "../") | sub("../site/"; "./"))' \
|
||||
"$<" >> "$@"
|
||||
|
||||
# Combine .gitignore with .prettierignore.include to generate .prettierignore.
|
||||
@@ -717,33 +644,6 @@ test:
|
||||
gotestsum --format standard-quiet -- -v -short -count=1 ./...
|
||||
.PHONY: test
|
||||
|
||||
# sqlc-cloud-is-setup will fail if no SQLc auth token is set. Use this as a
|
||||
# dependency for any sqlc-cloud related targets.
|
||||
sqlc-cloud-is-setup:
|
||||
if [[ "$(SQLC_AUTH_TOKEN)" == "" ]]; then
|
||||
echo "ERROR: 'SQLC_AUTH_TOKEN' must be set to auth with sqlc cloud before running verify." 1>&2
|
||||
exit 1
|
||||
fi
|
||||
.PHONY: sqlc-cloud-is-setup
|
||||
|
||||
sqlc-push: sqlc-cloud-is-setup test-postgres-docker
|
||||
echo "--- sqlc push"
|
||||
SQLC_DATABASE_URL="postgresql://postgres:postgres@localhost:5432/$(shell go run scripts/migrate-ci/main.go)" \
|
||||
sqlc push -f coderd/database/sqlc.yaml && echo "Passed sqlc push"
|
||||
.PHONY: sqlc-push
|
||||
|
||||
sqlc-verify: sqlc-cloud-is-setup test-postgres-docker
|
||||
echo "--- sqlc verify"
|
||||
SQLC_DATABASE_URL="postgresql://postgres:postgres@localhost:5432/$(shell go run scripts/migrate-ci/main.go)" \
|
||||
sqlc verify -f coderd/database/sqlc.yaml && echo "Passed sqlc verify"
|
||||
.PHONY: sqlc-verify
|
||||
|
||||
sqlc-vet: test-postgres-docker
|
||||
echo "--- sqlc vet"
|
||||
SQLC_DATABASE_URL="postgresql://postgres:postgres@localhost:5432/$(shell go run scripts/migrate-ci/main.go)" \
|
||||
sqlc vet -f coderd/database/sqlc.yaml && echo "Passed sqlc vet"
|
||||
.PHONY: sqlc-vet
|
||||
|
||||
# When updating -timeout for this test, keep in sync with
|
||||
# test-go-postgres (.github/workflows/coder.yaml).
|
||||
# Do add coverage flags so that test caching works.
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
</a>
|
||||
|
||||
<h1>
|
||||
Self-Hosted Cloud Development Environments
|
||||
Self-Hosted Remote Development Environments
|
||||
</h1>
|
||||
|
||||
<a href="https://coder.com#gh-light-mode-only">
|
||||
@@ -31,9 +31,9 @@
|
||||
|
||||
</div>
|
||||
|
||||
[Coder](https://coder.com) enables organizations to set up development environments in their public or private cloud infrastructure. Cloud development environments are defined with Terraform, connected through a secure high-speed Wireguard® tunnel, and are automatically shut down when not in use to save on costs. Coder gives engineering teams the flexibility to use the cloud for workloads that are most beneficial to them.
|
||||
[Coder](https://coder.com) enables organizations to set up development environments in the cloud. Environments are defined with Terraform, connected through a secure high-speed Wireguard® tunnel, and are automatically shut down when not in use to save on costs. Coder gives engineering teams the flexibility to use the cloud for workloads that are most beneficial to them.
|
||||
|
||||
- Define cloud development environments in Terraform
|
||||
- Define development environments in Terraform
|
||||
- EC2 VMs, Kubernetes Pods, Docker Containers, etc.
|
||||
- Automatically shutdown idle resources to save on costs
|
||||
- Onboard developers in seconds instead of days
|
||||
@@ -44,7 +44,7 @@
|
||||
|
||||
## Quickstart
|
||||
|
||||
The most convenient way to try Coder is to install it on your local machine and experiment with provisioning cloud development environments using Docker (works on Linux, macOS, and Windows).
|
||||
The most convenient way to try Coder is to install it on your local machine and experiment with provisioning development environments using Docker (works on Linux, macOS, and Windows).
|
||||
|
||||
```
|
||||
# First, install Coder
|
||||
@@ -70,11 +70,11 @@ curl -L https://coder.com/install.sh | sh
|
||||
|
||||
You can run the install script with `--dry-run` to see the commands that will be used to install without executing them. You can modify the installation process by including flags. Run the install script with `--help` for reference.
|
||||
|
||||
> See [install](https://coder.com/docs/v2/latest/install) for additional methods.
|
||||
> See [install](docs/install) for additional methods.
|
||||
|
||||
Once installed, you can start a production deployment<sup>1</sup> with a single command:
|
||||
|
||||
```shell
|
||||
```console
|
||||
# Automatically sets up an external access URL on *.try.coder.app
|
||||
coder server
|
||||
|
||||
@@ -100,7 +100,7 @@ Browse our docs [here](https://coder.com/docs/v2) or visit a specific section be
|
||||
|
||||
Feel free to [open an issue](https://github.com/coder/coder/issues/new) if you have questions, run into bugs, or have a feature request.
|
||||
|
||||
[Join our Discord](https://discord.gg/coder) or [Slack](https://cdr.co/join-community) to provide feedback on in-progress features, and chat with the community using Coder!
|
||||
[Join our Discord](https://discord.gg/coder) to provide feedback on in-progress features, and chat with the community using Coder!
|
||||
|
||||
## Contributing
|
||||
|
||||
|
||||
+39
-47
@@ -1,7 +1,7 @@
|
||||
# Coder Security
|
||||
|
||||
Coder welcomes feedback from security researchers and the general public to help
|
||||
improve our security. If you believe you have discovered a vulnerability,
|
||||
Coder welcomes feedback from security researchers and the general public
|
||||
to help improve our security. If you believe you have discovered a vulnerability,
|
||||
privacy issue, exposed data, or other security issues in any of our assets, we
|
||||
want to hear from you. This policy outlines steps for reporting vulnerabilities
|
||||
to us, what we expect, what you can expect from us.
|
||||
@@ -10,72 +10,64 @@ You can see the pretty version [here](https://coder.com/security/policy)
|
||||
|
||||
# Why Coder's security matters
|
||||
|
||||
If an attacker could fully compromise a Coder installation, they could spin up
|
||||
expensive workstations, steal valuable credentials, or steal proprietary source
|
||||
code. We take this risk very seriously and employ routine pen testing,
|
||||
vulnerability scanning, and code reviews. We also welcome the contributions from
|
||||
the community that helped make this product possible.
|
||||
If an attacker could fully compromise a Coder installation, they could spin
|
||||
up expensive workstations, steal valuable credentials, or steal proprietary
|
||||
source code. We take this risk very seriously and employ routine pen testing,
|
||||
vulnerability scanning, and code reviews. We also welcome the contributions
|
||||
from the community that helped make this product possible.
|
||||
|
||||
# Where should I report security issues?
|
||||
|
||||
Please report security issues to security@coder.com, providing all relevant
|
||||
information. The more details you provide, the easier it will be for us to
|
||||
triage and fix the issue.
|
||||
Please report security issues to security@coder.com, providing
|
||||
all relevant information. The more details you provide, the easier it will be
|
||||
for us to triage and fix the issue.
|
||||
|
||||
# Out of Scope
|
||||
|
||||
Our primary concern is around an abuse of the Coder application that allows an
|
||||
attacker to gain access to another users workspace, or spin up unwanted
|
||||
Our primary concern is around an abuse of the Coder application that allows
|
||||
an attacker to gain access to another users workspace, or spin up unwanted
|
||||
workspaces.
|
||||
|
||||
- DOS/DDOS attacks affecting availability --> While we do support rate limiting
|
||||
of requests, we primarily leave this to the owner of the Coder installation.
|
||||
Our rationale is that a DOS attack only affecting availability is not a
|
||||
valuable target for attackers.
|
||||
of requests, we primarily leave this to the owner of the Coder installation. Our
|
||||
rationale is that a DOS attack only affecting availability is not a valuable
|
||||
target for attackers.
|
||||
- Abuse of a compromised user credential --> If a user credential is compromised
|
||||
outside of the Coder ecosystem, then we consider it beyond the scope of our
|
||||
application. However, if an unprivileged user could escalate their permissions
|
||||
or gain access to another workspace, that is a cause for concern.
|
||||
outside of the Coder ecosystem, then we consider it beyond the scope of our application.
|
||||
However, if an unprivileged user could escalate their permissions or gain access
|
||||
to another workspace, that is a cause for concern.
|
||||
- Vulnerabilities in third party systems --> Vulnerabilities discovered in
|
||||
out-of-scope systems should be reported to the appropriate vendor or
|
||||
applicable authority.
|
||||
out-of-scope systems should be reported to the appropriate vendor or applicable authority.
|
||||
|
||||
# Our Commitments
|
||||
|
||||
When working with us, according to this policy, you can expect us to:
|
||||
|
||||
- Respond to your report promptly, and work with you to understand and validate
|
||||
your report;
|
||||
- Strive to keep you informed about the progress of a vulnerability as it is
|
||||
processed;
|
||||
- Work to remediate discovered vulnerabilities in a timely manner, within our
|
||||
operational constraints; and
|
||||
- Extend Safe Harbor for your vulnerability research that is related to this
|
||||
policy.
|
||||
- Respond to your report promptly, and work with you to understand and validate your report;
|
||||
- Strive to keep you informed about the progress of a vulnerability as it is processed;
|
||||
- Work to remediate discovered vulnerabilities in a timely manner, within our operational constraints; and
|
||||
- Extend Safe Harbor for your vulnerability research that is related to this policy.
|
||||
|
||||
# Our Expectations
|
||||
|
||||
In participating in our vulnerability disclosure program in good faith, we ask
|
||||
that you:
|
||||
In participating in our vulnerability disclosure program in good faith, we ask that you:
|
||||
|
||||
- Play by the rules, including following this policy and any other relevant
|
||||
agreements. If there is any inconsistency between this policy and any other
|
||||
applicable terms, the terms of this policy will prevail;
|
||||
- Play by the rules, including following this policy and any other relevant agreements.
|
||||
If there is any inconsistency between this policy and any other applicable terms, the
|
||||
terms of this policy will prevail;
|
||||
- Report any vulnerability you’ve discovered promptly;
|
||||
- Avoid violating the privacy of others, disrupting our systems, destroying
|
||||
data, and/or harming user experience;
|
||||
- Avoid violating the privacy of others, disrupting our systems, destroying data, and/or
|
||||
harming user experience;
|
||||
- Use only the Official Channels to discuss vulnerability information with us;
|
||||
- Provide us a reasonable amount of time (at least 90 days from the initial
|
||||
report) to resolve the issue before you disclose it publicly;
|
||||
- Perform testing only on in-scope systems, and respect systems and activities
|
||||
which are out-of-scope;
|
||||
- If a vulnerability provides unintended access to data: Limit the amount of
|
||||
data you access to the minimum required for effectively demonstrating a Proof
|
||||
of Concept; and cease testing and submit a report immediately if you encounter
|
||||
any user data during testing, such as Personally Identifiable Information
|
||||
(PII), Personal Healthcare Information (PHI), credit card data, or proprietary
|
||||
information;
|
||||
- You should only interact with test accounts you own or with explicit
|
||||
permission from
|
||||
- Provide us a reasonable amount of time (at least 90 days from the initial report) to
|
||||
resolve the issue before you disclose it publicly;
|
||||
- Perform testing only on in-scope systems, and respect systems and activities which
|
||||
are out-of-scope;
|
||||
- If a vulnerability provides unintended access to data: Limit the amount of data you
|
||||
access to the minimum required for effectively demonstrating a Proof of Concept; and
|
||||
cease testing and submit a report immediately if you encounter any user data during testing,
|
||||
such as Personally Identifiable Information (PII), Personal Healthcare Information (PHI),
|
||||
credit card data, or proprietary information;
|
||||
- You should only interact with test accounts you own or with explicit permission from
|
||||
- the account holder; and
|
||||
- Do not engage in extortion.
|
||||
|
||||
+346
-519
File diff suppressed because it is too large
Load Diff
+543
-672
File diff suppressed because it is too large
Load Diff
@@ -1,5 +0,0 @@
|
||||
// Package agentproctest contains utility functions
|
||||
// for testing process management in the agent.
|
||||
package agentproctest
|
||||
|
||||
//go:generate mockgen -destination ./syscallermock.go -package agentproctest github.com/coder/coder/v2/agent/agentproc Syscaller
|
||||
@@ -1,49 +0,0 @@
|
||||
package agentproctest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/agent/agentproc"
|
||||
"github.com/coder/coder/v2/cryptorand"
|
||||
)
|
||||
|
||||
func GenerateProcess(t *testing.T, fs afero.Fs, muts ...func(*agentproc.Process)) agentproc.Process {
|
||||
t.Helper()
|
||||
|
||||
pid, err := cryptorand.Intn(1<<31 - 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
arg1, err := cryptorand.String(5)
|
||||
require.NoError(t, err)
|
||||
|
||||
arg2, err := cryptorand.String(5)
|
||||
require.NoError(t, err)
|
||||
|
||||
arg3, err := cryptorand.String(5)
|
||||
require.NoError(t, err)
|
||||
|
||||
cmdline := fmt.Sprintf("%s\x00%s\x00%s", arg1, arg2, arg3)
|
||||
|
||||
process := agentproc.Process{
|
||||
CmdLine: cmdline,
|
||||
PID: int32(pid),
|
||||
}
|
||||
|
||||
for _, mut := range muts {
|
||||
mut(&process)
|
||||
}
|
||||
|
||||
process.Dir = fmt.Sprintf("%s/%d", "/proc", process.PID)
|
||||
|
||||
err = fs.MkdirAll(process.Dir, 0o555)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = afero.WriteFile(fs, fmt.Sprintf("%s/cmdline", process.Dir), []byte(process.CmdLine), 0o444)
|
||||
require.NoError(t, err)
|
||||
|
||||
return process
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: github.com/coder/coder/v2/agent/agentproc (interfaces: Syscaller)
|
||||
//
|
||||
// Generated by this command:
|
||||
//
|
||||
// mockgen -destination ./syscallermock.go -package agentproctest github.com/coder/coder/v2/agent/agentproc Syscaller
|
||||
//
|
||||
|
||||
// Package agentproctest is a generated GoMock package.
|
||||
package agentproctest
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
syscall "syscall"
|
||||
|
||||
gomock "go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
// MockSyscaller is a mock of Syscaller interface.
|
||||
type MockSyscaller struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockSyscallerMockRecorder
|
||||
}
|
||||
|
||||
// MockSyscallerMockRecorder is the mock recorder for MockSyscaller.
|
||||
type MockSyscallerMockRecorder struct {
|
||||
mock *MockSyscaller
|
||||
}
|
||||
|
||||
// NewMockSyscaller creates a new mock instance.
|
||||
func NewMockSyscaller(ctrl *gomock.Controller) *MockSyscaller {
|
||||
mock := &MockSyscaller{ctrl: ctrl}
|
||||
mock.recorder = &MockSyscallerMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockSyscaller) EXPECT() *MockSyscallerMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// GetPriority mocks base method.
|
||||
func (m *MockSyscaller) GetPriority(arg0 int32) (int, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetPriority", arg0)
|
||||
ret0, _ := ret[0].(int)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetPriority indicates an expected call of GetPriority.
|
||||
func (mr *MockSyscallerMockRecorder) GetPriority(arg0 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPriority", reflect.TypeOf((*MockSyscaller)(nil).GetPriority), arg0)
|
||||
}
|
||||
|
||||
// Kill mocks base method.
|
||||
func (m *MockSyscaller) Kill(arg0 int32, arg1 syscall.Signal) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Kill", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Kill indicates an expected call of Kill.
|
||||
func (mr *MockSyscallerMockRecorder) Kill(arg0, arg1 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Kill", reflect.TypeOf((*MockSyscaller)(nil).Kill), arg0, arg1)
|
||||
}
|
||||
|
||||
// SetPriority mocks base method.
|
||||
func (m *MockSyscaller) SetPriority(arg0 int32, arg1 int) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SetPriority", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// SetPriority indicates an expected call of SetPriority.
|
||||
func (mr *MockSyscallerMockRecorder) SetPriority(arg0, arg1 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPriority", reflect.TypeOf((*MockSyscaller)(nil).SetPriority), arg0, arg1)
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
// Package agentproc contains logic for interfacing with local
|
||||
// processes running in the same context as the agent.
|
||||
package agentproc
|
||||
@@ -1,24 +0,0 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package agentproc
|
||||
|
||||
import (
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
func (*Process) Niceness(Syscaller) (int, error) {
|
||||
return 0, errUnimplemented
|
||||
}
|
||||
|
||||
func (*Process) SetNiceness(Syscaller, int) error {
|
||||
return errUnimplemented
|
||||
}
|
||||
|
||||
func (*Process) Cmd() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func List(afero.Fs, Syscaller) ([]*Process, error) {
|
||||
return nil, errUnimplemented
|
||||
}
|
||||
@@ -1,166 +0,0 @@
|
||||
package agentproc_test
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/mock/gomock"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/agent/agentproc"
|
||||
"github.com/coder/coder/v2/agent/agentproc/agentproctest"
|
||||
)
|
||||
|
||||
func TestList(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS != "linux" {
|
||||
t.Skipf("skipping non-linux environment")
|
||||
}
|
||||
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
fs = afero.NewMemMapFs()
|
||||
sc = agentproctest.NewMockSyscaller(gomock.NewController(t))
|
||||
expectedProcs = make(map[int32]agentproc.Process)
|
||||
)
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
proc := agentproctest.GenerateProcess(t, fs)
|
||||
expectedProcs[proc.PID] = proc
|
||||
|
||||
sc.EXPECT().
|
||||
Kill(proc.PID, syscall.Signal(0)).
|
||||
Return(nil)
|
||||
}
|
||||
|
||||
actualProcs, err := agentproc.List(fs, sc)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, actualProcs, len(expectedProcs))
|
||||
for _, proc := range actualProcs {
|
||||
expected, ok := expectedProcs[proc.PID]
|
||||
require.True(t, ok)
|
||||
require.Equal(t, expected.PID, proc.PID)
|
||||
require.Equal(t, expected.CmdLine, proc.CmdLine)
|
||||
require.Equal(t, expected.Dir, proc.Dir)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("FinishedProcess", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
fs = afero.NewMemMapFs()
|
||||
sc = agentproctest.NewMockSyscaller(gomock.NewController(t))
|
||||
expectedProcs = make(map[int32]agentproc.Process)
|
||||
)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
proc := agentproctest.GenerateProcess(t, fs)
|
||||
expectedProcs[proc.PID] = proc
|
||||
|
||||
sc.EXPECT().
|
||||
Kill(proc.PID, syscall.Signal(0)).
|
||||
Return(nil)
|
||||
}
|
||||
|
||||
// Create a process that's already finished. We're not adding
|
||||
// it to the map because it should be skipped over.
|
||||
proc := agentproctest.GenerateProcess(t, fs)
|
||||
sc.EXPECT().
|
||||
Kill(proc.PID, syscall.Signal(0)).
|
||||
Return(xerrors.New("os: process already finished"))
|
||||
|
||||
actualProcs, err := agentproc.List(fs, sc)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, actualProcs, len(expectedProcs))
|
||||
for _, proc := range actualProcs {
|
||||
expected, ok := expectedProcs[proc.PID]
|
||||
require.True(t, ok)
|
||||
require.Equal(t, expected.PID, proc.PID)
|
||||
require.Equal(t, expected.CmdLine, proc.CmdLine)
|
||||
require.Equal(t, expected.Dir, proc.Dir)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("NoSuchProcess", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
fs = afero.NewMemMapFs()
|
||||
sc = agentproctest.NewMockSyscaller(gomock.NewController(t))
|
||||
expectedProcs = make(map[int32]agentproc.Process)
|
||||
)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
proc := agentproctest.GenerateProcess(t, fs)
|
||||
expectedProcs[proc.PID] = proc
|
||||
|
||||
sc.EXPECT().
|
||||
Kill(proc.PID, syscall.Signal(0)).
|
||||
Return(nil)
|
||||
}
|
||||
|
||||
// Create a process that doesn't exist. We're not adding
|
||||
// it to the map because it should be skipped over.
|
||||
proc := agentproctest.GenerateProcess(t, fs)
|
||||
sc.EXPECT().
|
||||
Kill(proc.PID, syscall.Signal(0)).
|
||||
Return(syscall.ESRCH)
|
||||
|
||||
actualProcs, err := agentproc.List(fs, sc)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, actualProcs, len(expectedProcs))
|
||||
for _, proc := range actualProcs {
|
||||
expected, ok := expectedProcs[proc.PID]
|
||||
require.True(t, ok)
|
||||
require.Equal(t, expected.PID, proc.PID)
|
||||
require.Equal(t, expected.CmdLine, proc.CmdLine)
|
||||
require.Equal(t, expected.Dir, proc.Dir)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// These tests are not very interesting but they provide some modicum of
|
||||
// confidence.
|
||||
func TestProcess(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS != "linux" {
|
||||
t.Skipf("skipping non-linux environment")
|
||||
}
|
||||
|
||||
t.Run("SetNiceness", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
sc = agentproctest.NewMockSyscaller(gomock.NewController(t))
|
||||
proc = &agentproc.Process{
|
||||
PID: 32,
|
||||
}
|
||||
score = 20
|
||||
)
|
||||
|
||||
sc.EXPECT().SetPriority(proc.PID, score).Return(nil)
|
||||
err := proc.SetNiceness(sc, score)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Cmd", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
proc = &agentproc.Process{
|
||||
CmdLine: "helloworld\x00--arg1\x00--arg2",
|
||||
}
|
||||
expectedName = "helloworld --arg1 --arg2"
|
||||
)
|
||||
|
||||
require.Equal(t, expectedName, proc.Cmd())
|
||||
})
|
||||
}
|
||||
@@ -1,109 +0,0 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package agentproc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/spf13/afero"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
func List(fs afero.Fs, syscaller Syscaller) ([]*Process, error) {
|
||||
d, err := fs.Open(defaultProcDir)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("open dir %q: %w", defaultProcDir, err)
|
||||
}
|
||||
defer d.Close()
|
||||
|
||||
entries, err := d.Readdirnames(0)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("readdirnames: %w", err)
|
||||
}
|
||||
|
||||
processes := make([]*Process, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
pid, err := strconv.ParseInt(entry, 10, 32)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check that the process still exists.
|
||||
exists, err := isProcessExist(syscaller, int32(pid))
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("check process exists: %w", err)
|
||||
}
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
|
||||
cmdline, err := afero.ReadFile(fs, filepath.Join(defaultProcDir, entry, "cmdline"))
|
||||
if err != nil {
|
||||
var errNo syscall.Errno
|
||||
if xerrors.As(err, &errNo) && errNo == syscall.EPERM {
|
||||
continue
|
||||
}
|
||||
return nil, xerrors.Errorf("read cmdline: %w", err)
|
||||
}
|
||||
processes = append(processes, &Process{
|
||||
PID: int32(pid),
|
||||
CmdLine: string(cmdline),
|
||||
Dir: filepath.Join(defaultProcDir, entry),
|
||||
})
|
||||
}
|
||||
|
||||
return processes, nil
|
||||
}
|
||||
|
||||
func isProcessExist(syscaller Syscaller, pid int32) (bool, error) {
|
||||
err := syscaller.Kill(pid, syscall.Signal(0))
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if err.Error() == "os: process already finished" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var errno syscall.Errno
|
||||
if !errors.As(err, &errno) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
switch errno {
|
||||
case syscall.ESRCH:
|
||||
return false, nil
|
||||
case syscall.EPERM:
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, xerrors.Errorf("kill: %w", err)
|
||||
}
|
||||
|
||||
func (p *Process) Niceness(sc Syscaller) (int, error) {
|
||||
nice, err := sc.GetPriority(p.PID)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("get priority for %q: %w", p.CmdLine, err)
|
||||
}
|
||||
return nice, nil
|
||||
}
|
||||
|
||||
func (p *Process) SetNiceness(sc Syscaller, score int) error {
|
||||
err := sc.SetPriority(p.PID, score)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("set priority for %q: %w", p.CmdLine, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Process) Cmd() string {
|
||||
return strings.Join(p.cmdLine(), " ")
|
||||
}
|
||||
|
||||
func (p *Process) cmdLine() []string {
|
||||
return strings.Split(p.CmdLine, "\x00")
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
package agentproc
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type Syscaller interface {
|
||||
SetPriority(pid int32, priority int) error
|
||||
GetPriority(pid int32) (int, error)
|
||||
Kill(pid int32, sig syscall.Signal) error
|
||||
}
|
||||
|
||||
// nolint: unused // used on some but no all platforms
|
||||
const defaultProcDir = "/proc"
|
||||
|
||||
type Process struct {
|
||||
Dir string
|
||||
CmdLine string
|
||||
PID int32
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package agentproc
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
func NewSyscaller() Syscaller {
|
||||
return nopSyscaller{}
|
||||
}
|
||||
|
||||
var errUnimplemented = xerrors.New("unimplemented")
|
||||
|
||||
type nopSyscaller struct{}
|
||||
|
||||
func (nopSyscaller) SetPriority(int32, int) error {
|
||||
return errUnimplemented
|
||||
}
|
||||
|
||||
func (nopSyscaller) GetPriority(int32) (int, error) {
|
||||
return 0, errUnimplemented
|
||||
}
|
||||
|
||||
func (nopSyscaller) Kill(int32, syscall.Signal) error {
|
||||
return errUnimplemented
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package agentproc
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
func NewSyscaller() Syscaller {
|
||||
return UnixSyscaller{}
|
||||
}
|
||||
|
||||
type UnixSyscaller struct{}
|
||||
|
||||
func (UnixSyscaller) SetPriority(pid int32, nice int) error {
|
||||
err := unix.Setpriority(unix.PRIO_PROCESS, int(pid), nice)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("set priority: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (UnixSyscaller) GetPriority(pid int32) (int, error) {
|
||||
nice, err := unix.Getpriority(0, int(pid))
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("get priority: %w", err)
|
||||
}
|
||||
return nice, nil
|
||||
}
|
||||
|
||||
func (UnixSyscaller) Kill(pid int32, sig syscall.Signal) error {
|
||||
err := syscall.Kill(int(pid), sig)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("kill: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,357 +0,0 @@
|
||||
package agentscripts
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/robfig/cron/v3"
|
||||
"github.com/spf13/afero"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/coder/coder/v2/agent/agentssh"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrTimeout is returned when a script times out.
|
||||
ErrTimeout = xerrors.New("script timed out")
|
||||
// ErrOutputPipesOpen is returned when a script exits leaving the output
|
||||
// pipe(s) (stdout, stderr) open. This happens because we set WaitDelay on
|
||||
// the command, which gives us two things:
|
||||
//
|
||||
// 1. The ability to ensure that a script exits (this is important for e.g.
|
||||
// blocking login, and avoiding doing so indefinitely)
|
||||
// 2. Improved command cancellation on timeout
|
||||
ErrOutputPipesOpen = xerrors.New("script exited without closing output pipes")
|
||||
|
||||
parser = cron.NewParser(cron.Second | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.DowOptional)
|
||||
)
|
||||
|
||||
// Options are a set of options for the runner.
|
||||
type Options struct {
|
||||
LogDir string
|
||||
Logger slog.Logger
|
||||
SSHServer *agentssh.Server
|
||||
Filesystem afero.Fs
|
||||
PatchLogs func(ctx context.Context, req agentsdk.PatchLogs) error
|
||||
}
|
||||
|
||||
// New creates a runner for the provided scripts.
|
||||
func New(opts Options) *Runner {
|
||||
cronCtx, cronCtxCancel := context.WithCancel(context.Background())
|
||||
return &Runner{
|
||||
Options: opts,
|
||||
cronCtx: cronCtx,
|
||||
cronCtxCancel: cronCtxCancel,
|
||||
cron: cron.New(cron.WithParser(parser)),
|
||||
closed: make(chan struct{}),
|
||||
scriptsExecuted: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "agent",
|
||||
Subsystem: "scripts",
|
||||
Name: "executed_total",
|
||||
}, []string{"success"}),
|
||||
}
|
||||
}
|
||||
|
||||
type Runner struct {
|
||||
Options
|
||||
|
||||
cronCtx context.Context
|
||||
cronCtxCancel context.CancelFunc
|
||||
cmdCloseWait sync.WaitGroup
|
||||
closed chan struct{}
|
||||
closeMutex sync.Mutex
|
||||
cron *cron.Cron
|
||||
initialized atomic.Bool
|
||||
scripts []codersdk.WorkspaceAgentScript
|
||||
|
||||
// scriptsExecuted includes all scripts executed by the workspace agent. Agents
|
||||
// execute startup scripts, and scripts on a cron schedule. Both will increment
|
||||
// this counter.
|
||||
scriptsExecuted *prometheus.CounterVec
|
||||
}
|
||||
|
||||
func (r *Runner) RegisterMetrics(reg prometheus.Registerer) {
|
||||
if reg == nil {
|
||||
// If no registry, do nothing.
|
||||
return
|
||||
}
|
||||
reg.MustRegister(r.scriptsExecuted)
|
||||
}
|
||||
|
||||
// Init initializes the runner with the provided scripts.
|
||||
// It also schedules any scripts that have a schedule.
|
||||
// This function must be called before Execute.
|
||||
func (r *Runner) Init(scripts []codersdk.WorkspaceAgentScript) error {
|
||||
if r.initialized.Load() {
|
||||
return xerrors.New("init: already initialized")
|
||||
}
|
||||
r.initialized.Store(true)
|
||||
r.scripts = scripts
|
||||
r.Logger.Info(r.cronCtx, "initializing agent scripts", slog.F("script_count", len(scripts)), slog.F("log_dir", r.LogDir))
|
||||
|
||||
for _, script := range scripts {
|
||||
if script.Cron == "" {
|
||||
continue
|
||||
}
|
||||
script := script
|
||||
_, err := r.cron.AddFunc(script.Cron, func() {
|
||||
err := r.trackRun(r.cronCtx, script)
|
||||
if err != nil {
|
||||
r.Logger.Warn(context.Background(), "run agent script on schedule", slog.Error(err))
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("add schedule: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartCron starts the cron scheduler.
|
||||
// This is done async to allow for the caller to execute scripts prior.
|
||||
func (r *Runner) StartCron() {
|
||||
// cron.Start() and cron.Stop() does not guarantee that the cron goroutine
|
||||
// has exited by the time the `cron.Stop()` context returns, so we need to
|
||||
// track it manually.
|
||||
err := r.trackCommandGoroutine(func() {
|
||||
// Since this is run async, in quick unit tests, it is possible the
|
||||
// Close() function gets called before we even start the cron.
|
||||
// In these cases, the Run() will never end.
|
||||
// So if we are closed, we just return, and skip the Run() entirely.
|
||||
select {
|
||||
case <-r.cronCtx.Done():
|
||||
// The cronCtx is canceled before cron.Close() happens. So if the ctx is
|
||||
// canceled, then Close() will be called, or it is about to be called.
|
||||
// So do nothing!
|
||||
default:
|
||||
r.cron.Run()
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
r.Logger.Warn(context.Background(), "start cron failed", slog.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// Execute runs a set of scripts according to a filter.
|
||||
func (r *Runner) Execute(ctx context.Context, filter func(script codersdk.WorkspaceAgentScript) bool) error {
|
||||
if filter == nil {
|
||||
// Execute em' all!
|
||||
filter = func(script codersdk.WorkspaceAgentScript) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
var eg errgroup.Group
|
||||
for _, script := range r.scripts {
|
||||
if !filter(script) {
|
||||
continue
|
||||
}
|
||||
script := script
|
||||
eg.Go(func() error {
|
||||
err := r.trackRun(ctx, script)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("run agent script %q: %w", script.LogSourceID, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
// trackRun wraps "run" with metrics.
|
||||
func (r *Runner) trackRun(ctx context.Context, script codersdk.WorkspaceAgentScript) error {
|
||||
err := r.run(ctx, script)
|
||||
if err != nil {
|
||||
r.scriptsExecuted.WithLabelValues("false").Add(1)
|
||||
} else {
|
||||
r.scriptsExecuted.WithLabelValues("true").Add(1)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// run executes the provided script with the timeout.
|
||||
// If the timeout is exceeded, the process is sent an interrupt signal.
|
||||
// If the process does not exit after a few seconds, it is forcefully killed.
|
||||
// This function immediately returns after a timeout, and does not wait for the process to exit.
|
||||
func (r *Runner) run(ctx context.Context, script codersdk.WorkspaceAgentScript) error {
|
||||
logPath := script.LogPath
|
||||
if logPath == "" {
|
||||
logPath = fmt.Sprintf("coder-script-%s.log", script.LogSourceID)
|
||||
}
|
||||
if logPath[0] == '~' {
|
||||
// First we check the environment.
|
||||
homeDir, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("current user: %w", err)
|
||||
}
|
||||
homeDir = u.HomeDir
|
||||
}
|
||||
logPath = filepath.Join(homeDir, logPath[1:])
|
||||
}
|
||||
logPath = os.ExpandEnv(logPath)
|
||||
if !filepath.IsAbs(logPath) {
|
||||
logPath = filepath.Join(r.LogDir, logPath)
|
||||
}
|
||||
logger := r.Logger.With(slog.F("log_path", logPath))
|
||||
logger.Info(ctx, "running agent script", slog.F("script", script.Script))
|
||||
|
||||
fileWriter, err := r.Filesystem.OpenFile(logPath, os.O_CREATE|os.O_RDWR, 0o600)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("open %s script log file: %w", logPath, err)
|
||||
}
|
||||
defer func() {
|
||||
err := fileWriter.Close()
|
||||
if err != nil {
|
||||
logger.Warn(ctx, fmt.Sprintf("close %s script log file", logPath), slog.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
var cmd *exec.Cmd
|
||||
cmdCtx := ctx
|
||||
if script.Timeout > 0 {
|
||||
var ctxCancel context.CancelFunc
|
||||
cmdCtx, ctxCancel = context.WithTimeout(ctx, script.Timeout)
|
||||
defer ctxCancel()
|
||||
}
|
||||
cmdPty, err := r.SSHServer.CreateCommand(cmdCtx, script.Script, nil)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("%s script: create command: %w", logPath, err)
|
||||
}
|
||||
cmd = cmdPty.AsExec()
|
||||
cmd.SysProcAttr = cmdSysProcAttr()
|
||||
cmd.WaitDelay = 10 * time.Second
|
||||
cmd.Cancel = cmdCancel(cmd)
|
||||
|
||||
send, flushAndClose := agentsdk.LogsSender(script.LogSourceID, r.PatchLogs, logger)
|
||||
// If ctx is canceled here (or in a writer below), we may be
|
||||
// discarding logs, but that's okay because we're shutting down
|
||||
// anyway. We could consider creating a new context here if we
|
||||
// want better control over flush during shutdown.
|
||||
defer func() {
|
||||
if err := flushAndClose(ctx); err != nil {
|
||||
logger.Warn(ctx, "flush startup logs failed", slog.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
infoW := agentsdk.LogsWriter(ctx, send, script.LogSourceID, codersdk.LogLevelInfo)
|
||||
defer infoW.Close()
|
||||
errW := agentsdk.LogsWriter(ctx, send, script.LogSourceID, codersdk.LogLevelError)
|
||||
defer errW.Close()
|
||||
cmd.Stdout = io.MultiWriter(fileWriter, infoW)
|
||||
cmd.Stderr = io.MultiWriter(fileWriter, errW)
|
||||
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
end := time.Now()
|
||||
execTime := end.Sub(start)
|
||||
exitCode := 0
|
||||
if err != nil {
|
||||
exitCode = 255 // Unknown status.
|
||||
var exitError *exec.ExitError
|
||||
if xerrors.As(err, &exitError) {
|
||||
exitCode = exitError.ExitCode()
|
||||
}
|
||||
logger.Warn(ctx, fmt.Sprintf("%s script failed", logPath), slog.F("execution_time", execTime), slog.F("exit_code", exitCode), slog.Error(err))
|
||||
} else {
|
||||
logger.Info(ctx, fmt.Sprintf("%s script completed", logPath), slog.F("execution_time", execTime), slog.F("exit_code", exitCode))
|
||||
}
|
||||
}()
|
||||
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return ErrTimeout
|
||||
}
|
||||
return xerrors.Errorf("%s script: start command: %w", logPath, err)
|
||||
}
|
||||
|
||||
cmdDone := make(chan error, 1)
|
||||
err = r.trackCommandGoroutine(func() {
|
||||
cmdDone <- cmd.Wait()
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("%s script: track command goroutine: %w", logPath, err)
|
||||
}
|
||||
select {
|
||||
case <-cmdCtx.Done():
|
||||
// Wait for the command to drain!
|
||||
select {
|
||||
case <-cmdDone:
|
||||
case <-time.After(10 * time.Second):
|
||||
}
|
||||
err = cmdCtx.Err()
|
||||
case err = <-cmdDone:
|
||||
}
|
||||
switch {
|
||||
case errors.Is(err, exec.ErrWaitDelay):
|
||||
err = ErrOutputPipesOpen
|
||||
message := fmt.Sprintf("script exited successfully, but output pipes were not closed after %s", cmd.WaitDelay)
|
||||
details := fmt.Sprint(
|
||||
"This usually means a child process was started with references to stdout or stderr. As a result, this " +
|
||||
"process may now have been terminated. Consider redirecting the output or using a separate " +
|
||||
"\"coder_script\" for the process, see " +
|
||||
"https://coder.com/docs/v2/latest/templates/troubleshooting#startup-script-issues for more information.",
|
||||
)
|
||||
// Inform the user by propagating the message via log writers.
|
||||
_, _ = fmt.Fprintf(cmd.Stderr, "WARNING: %s. %s\n", message, details)
|
||||
// Also log to agent logs for ease of debugging.
|
||||
r.Logger.Warn(ctx, message, slog.F("details", details), slog.Error(err))
|
||||
|
||||
case errors.Is(err, context.DeadlineExceeded):
|
||||
err = ErrTimeout
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *Runner) Close() error {
|
||||
r.closeMutex.Lock()
|
||||
defer r.closeMutex.Unlock()
|
||||
if r.isClosed() {
|
||||
return nil
|
||||
}
|
||||
close(r.closed)
|
||||
// Must cancel the cron ctx BEFORE stopping the cron.
|
||||
r.cronCtxCancel()
|
||||
<-r.cron.Stop().Done()
|
||||
r.cmdCloseWait.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Runner) trackCommandGoroutine(fn func()) error {
|
||||
r.closeMutex.Lock()
|
||||
defer r.closeMutex.Unlock()
|
||||
if r.isClosed() {
|
||||
return xerrors.New("track command goroutine: closed")
|
||||
}
|
||||
r.cmdCloseWait.Add(1)
|
||||
go func() {
|
||||
defer r.cmdCloseWait.Done()
|
||||
fn()
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Runner) isClosed() bool {
|
||||
select {
|
||||
case <-r.closed:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
//go:build !windows
|
||||
|
||||
package agentscripts
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func cmdSysProcAttr() *syscall.SysProcAttr {
|
||||
return &syscall.SysProcAttr{
|
||||
Setsid: true,
|
||||
}
|
||||
}
|
||||
|
||||
func cmdCancel(cmd *exec.Cmd) func() error {
|
||||
return func() error {
|
||||
return syscall.Kill(-cmd.Process.Pid, syscall.SIGHUP)
|
||||
}
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
package agentscripts_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/atomic"
|
||||
"go.uber.org/goleak"
|
||||
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
"github.com/coder/coder/v2/agent/agentscripts"
|
||||
"github.com/coder/coder/v2/agent/agentssh"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m)
|
||||
}
|
||||
|
||||
func TestExecuteBasic(t *testing.T) {
|
||||
t.Parallel()
|
||||
logs := make(chan agentsdk.PatchLogs, 1)
|
||||
runner := setup(t, func(ctx context.Context, req agentsdk.PatchLogs) error {
|
||||
logs <- req
|
||||
return nil
|
||||
})
|
||||
defer runner.Close()
|
||||
err := runner.Init([]codersdk.WorkspaceAgentScript{{
|
||||
Script: "echo hello",
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, runner.Execute(context.Background(), func(script codersdk.WorkspaceAgentScript) bool {
|
||||
return true
|
||||
}))
|
||||
log := <-logs
|
||||
require.Equal(t, "hello", log.Logs[0].Output)
|
||||
}
|
||||
|
||||
func TestTimeout(t *testing.T) {
|
||||
t.Parallel()
|
||||
runner := setup(t, nil)
|
||||
defer runner.Close()
|
||||
err := runner.Init([]codersdk.WorkspaceAgentScript{{
|
||||
Script: "sleep infinity",
|
||||
Timeout: time.Millisecond,
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
require.ErrorIs(t, runner.Execute(context.Background(), nil), agentscripts.ErrTimeout)
|
||||
}
|
||||
|
||||
// TestCronClose exists because cron.Run() can happen after cron.Close().
|
||||
// If this happens, there used to be a deadlock.
|
||||
func TestCronClose(t *testing.T) {
|
||||
t.Parallel()
|
||||
runner := agentscripts.New(agentscripts.Options{})
|
||||
runner.StartCron()
|
||||
require.NoError(t, runner.Close(), "close runner")
|
||||
}
|
||||
|
||||
func setup(t *testing.T, patchLogs func(ctx context.Context, req agentsdk.PatchLogs) error) *agentscripts.Runner {
|
||||
t.Helper()
|
||||
if patchLogs == nil {
|
||||
// noop
|
||||
patchLogs = func(ctx context.Context, req agentsdk.PatchLogs) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
fs := afero.NewMemMapFs()
|
||||
logger := slogtest.Make(t, nil)
|
||||
s, err := agentssh.NewServer(context.Background(), logger, prometheus.NewRegistry(), fs, 0, "")
|
||||
require.NoError(t, err)
|
||||
s.AgentToken = func() string { return "" }
|
||||
s.Manifest = atomic.NewPointer(&agentsdk.Manifest{})
|
||||
t.Cleanup(func() {
|
||||
_ = s.Close()
|
||||
})
|
||||
return agentscripts.New(agentscripts.Options{
|
||||
LogDir: t.TempDir(),
|
||||
Logger: logger,
|
||||
SSHServer: s,
|
||||
Filesystem: fs,
|
||||
PatchLogs: patchLogs,
|
||||
})
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package agentscripts
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func cmdSysProcAttr() *syscall.SysProcAttr {
|
||||
return &syscall.SysProcAttr{}
|
||||
}
|
||||
|
||||
func cmdCancel(cmd *exec.Cmd) func() error {
|
||||
return func() error {
|
||||
return cmd.Process.Signal(os.Interrupt)
|
||||
}
|
||||
}
|
||||
+42
-159
@@ -19,8 +19,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gliderlabs/ssh"
|
||||
"github.com/google/uuid"
|
||||
"github.com/kballard/go-shellquote"
|
||||
"github.com/pkg/sftp"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/spf13/afero"
|
||||
@@ -30,10 +28,10 @@ import (
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/coder/coder/v2/agent/usershell"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
"github.com/coder/coder/v2/pty"
|
||||
"github.com/coder/coder/agent/usershell"
|
||||
"github.com/coder/coder/codersdk"
|
||||
"github.com/coder/coder/codersdk/agentsdk"
|
||||
"github.com/coder/coder/pty"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -47,12 +45,8 @@ const (
|
||||
MagicSessionTypeEnvironmentVariable = "CODER_SSH_SESSION_TYPE"
|
||||
// MagicSessionTypeVSCode is set in the SSH config by the VS Code extension to identify itself.
|
||||
MagicSessionTypeVSCode = "vscode"
|
||||
// MagicSessionTypeJetBrains is set in the SSH config by the JetBrains
|
||||
// extension to identify itself.
|
||||
// MagicSessionTypeJetBrains is set in the SSH config by the JetBrains extension to identify itself.
|
||||
MagicSessionTypeJetBrains = "jetbrains"
|
||||
// MagicProcessCmdlineJetBrains is a string in a process's command line that
|
||||
// uniquely identifies it as JetBrains software.
|
||||
MagicProcessCmdlineJetBrains = "idea.vendor.name=JetBrains"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
@@ -99,7 +93,7 @@ func NewServer(ctx context.Context, logger slog.Logger, prometheusRegistry *prom
|
||||
}
|
||||
|
||||
forwardHandler := &ssh.ForwardedTCPHandler{}
|
||||
unixForwardHandler := newForwardedUnixHandler(logger)
|
||||
unixForwardHandler := &forwardedUnixHandler{log: logger}
|
||||
|
||||
metrics := newSSHServerMetrics(prometheusRegistry)
|
||||
s := &Server{
|
||||
@@ -115,11 +109,7 @@ func NewServer(ctx context.Context, logger slog.Logger, prometheusRegistry *prom
|
||||
|
||||
srv := &ssh.Server{
|
||||
ChannelHandlers: map[string]ssh.ChannelHandler{
|
||||
"direct-tcpip": func(srv *ssh.Server, conn *gossh.ServerConn, newChan gossh.NewChannel, ctx ssh.Context) {
|
||||
// Wrapper is designed to find and track JetBrains Gateway connections.
|
||||
wrapped := NewJetbrainsChannelWatcher(ctx, s.logger, newChan, &s.connCountJetBrains)
|
||||
ssh.DirectTCPIPHandler(srv, conn, wrapped, ctx)
|
||||
},
|
||||
"direct-tcpip": ssh.DirectTCPIPHandler,
|
||||
"direct-streamlocal@openssh.com": directStreamLocalHandler,
|
||||
"session": ssh.DefaultSessionHandler,
|
||||
},
|
||||
@@ -150,7 +140,7 @@ func NewServer(ctx context.Context, logger slog.Logger, prometheusRegistry *prom
|
||||
},
|
||||
ReversePortForwardingCallback: func(ctx ssh.Context, bindHost string, bindPort uint32) bool {
|
||||
// Allow reverse port forwarding all!
|
||||
s.logger.Debug(ctx, "reverse port forward",
|
||||
s.logger.Debug(ctx, "local port forward",
|
||||
slog.F("bind_host", bindHost),
|
||||
slog.F("bind_port", bindPort))
|
||||
return true
|
||||
@@ -201,16 +191,9 @@ func (s *Server) ConnStats() ConnStats {
|
||||
}
|
||||
|
||||
func (s *Server) sessionHandler(session ssh.Session) {
|
||||
logger := s.logger.With(slog.F("remote_addr", session.RemoteAddr()), slog.F("local_addr", session.LocalAddr()))
|
||||
logger.Info(session.Context(), "handling ssh session")
|
||||
ctx := session.Context()
|
||||
logger := s.logger.With(
|
||||
slog.F("remote_addr", session.RemoteAddr()),
|
||||
slog.F("local_addr", session.LocalAddr()),
|
||||
// Assigning a random uuid for each session is useful for tracking
|
||||
// logs for the same ssh session.
|
||||
slog.F("id", uuid.NewString()),
|
||||
)
|
||||
logger.Info(ctx, "handling ssh session")
|
||||
|
||||
if !s.trackSession(session, true) {
|
||||
// See (*Server).Close() for why we call Close instead of Exit.
|
||||
_ = session.Close()
|
||||
@@ -234,7 +217,7 @@ func (s *Server) sessionHandler(session ssh.Session) {
|
||||
switch ss := session.Subsystem(); ss {
|
||||
case "":
|
||||
case "sftp":
|
||||
s.sftpHandler(logger, session)
|
||||
s.sftpHandler(session)
|
||||
return
|
||||
default:
|
||||
logger.Warn(ctx, "unsupported subsystem", slog.F("subsystem", ss))
|
||||
@@ -242,32 +225,11 @@ func (s *Server) sessionHandler(session ssh.Session) {
|
||||
return
|
||||
}
|
||||
|
||||
err := s.sessionStart(logger, session, extraEnv)
|
||||
err := s.sessionStart(session, extraEnv)
|
||||
var exitError *exec.ExitError
|
||||
if xerrors.As(err, &exitError) {
|
||||
code := exitError.ExitCode()
|
||||
if code == -1 {
|
||||
// If we return -1 here, it will be transmitted as an
|
||||
// uint32(4294967295). This exit code is nonsense, so
|
||||
// instead we return 255 (same as OpenSSH). This is
|
||||
// also the same exit code that the shell returns for
|
||||
// -1.
|
||||
//
|
||||
// For signals, we could consider sending 128+signal
|
||||
// instead (however, OpenSSH doesn't seem to do this).
|
||||
code = 255
|
||||
}
|
||||
logger.Info(ctx, "ssh session returned",
|
||||
slog.Error(exitError),
|
||||
slog.F("process_exit_code", exitError.ExitCode()),
|
||||
slog.F("exit_code", code),
|
||||
)
|
||||
|
||||
// TODO(mafredri): For signal exit, there's also an "exit-signal"
|
||||
// request (session.Exit sends "exit-status"), however, since it's
|
||||
// not implemented on the session interface and not used by
|
||||
// OpenSSH, we'll leave it for now.
|
||||
_ = session.Exit(code)
|
||||
logger.Info(ctx, "ssh session returned", slog.Error(exitError))
|
||||
_ = session.Exit(exitError.ExitCode())
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
@@ -281,7 +243,7 @@ func (s *Server) sessionHandler(session ssh.Session) {
|
||||
_ = session.Exit(0)
|
||||
}
|
||||
|
||||
func (s *Server) sessionStart(logger slog.Logger, session ssh.Session, extraEnv []string) (retErr error) {
|
||||
func (s *Server) sessionStart(session ssh.Session, extraEnv []string) (retErr error) {
|
||||
ctx := session.Context()
|
||||
env := append(session.Environ(), extraEnv...)
|
||||
var magicType string
|
||||
@@ -289,23 +251,21 @@ func (s *Server) sessionStart(logger slog.Logger, session ssh.Session, extraEnv
|
||||
if !strings.HasPrefix(kv, MagicSessionTypeEnvironmentVariable) {
|
||||
continue
|
||||
}
|
||||
magicType = strings.ToLower(strings.TrimPrefix(kv, MagicSessionTypeEnvironmentVariable+"="))
|
||||
magicType = strings.TrimPrefix(kv, MagicSessionTypeEnvironmentVariable+"=")
|
||||
env = append(env[:index], env[index+1:]...)
|
||||
}
|
||||
|
||||
// Always force lowercase checking to be case-insensitive.
|
||||
switch magicType {
|
||||
case MagicSessionTypeVSCode:
|
||||
s.connCountVSCode.Add(1)
|
||||
defer s.connCountVSCode.Add(-1)
|
||||
case MagicSessionTypeJetBrains:
|
||||
// Do nothing here because JetBrains launches hundreds of ssh sessions.
|
||||
// We instead track JetBrains in the single persistent tcp forwarding channel.
|
||||
s.connCountJetBrains.Add(1)
|
||||
defer s.connCountJetBrains.Add(-1)
|
||||
case "":
|
||||
s.connCountSSHSession.Add(1)
|
||||
defer s.connCountSSHSession.Add(-1)
|
||||
default:
|
||||
logger.Warn(ctx, "invalid magic ssh session type specified", slog.F("type", magicType))
|
||||
s.logger.Warn(ctx, "invalid magic ssh session type specified", slog.F("type", magicType))
|
||||
}
|
||||
|
||||
magicTypeLabel := magicTypeMetricLabel(magicType)
|
||||
@@ -338,12 +298,12 @@ func (s *Server) sessionStart(logger slog.Logger, session ssh.Session, extraEnv
|
||||
}
|
||||
|
||||
if isPty {
|
||||
return s.startPTYSession(logger, session, magicTypeLabel, cmd, sshPty, windowSize)
|
||||
return s.startPTYSession(session, magicTypeLabel, cmd, sshPty, windowSize)
|
||||
}
|
||||
return s.startNonPTYSession(logger, session, magicTypeLabel, cmd.AsExec())
|
||||
return s.startNonPTYSession(session, magicTypeLabel, cmd.AsExec())
|
||||
}
|
||||
|
||||
func (s *Server) startNonPTYSession(logger slog.Logger, session ssh.Session, magicTypeLabel string, cmd *exec.Cmd) error {
|
||||
func (s *Server) startNonPTYSession(session ssh.Session, magicTypeLabel string, cmd *exec.Cmd) error {
|
||||
s.metrics.sessionsTotal.WithLabelValues(magicTypeLabel, "no").Add(1)
|
||||
|
||||
cmd.Stdout = session
|
||||
@@ -367,17 +327,6 @@ func (s *Server) startNonPTYSession(logger slog.Logger, session ssh.Session, mag
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "no", "start_command").Add(1)
|
||||
return xerrors.Errorf("start: %w", err)
|
||||
}
|
||||
sigs := make(chan ssh.Signal, 1)
|
||||
session.Signals(sigs)
|
||||
defer func() {
|
||||
session.Signals(nil)
|
||||
close(sigs)
|
||||
}()
|
||||
go func() {
|
||||
for sig := range sigs {
|
||||
s.handleSignal(logger, sig, cmd.Process, magicTypeLabel)
|
||||
}
|
||||
}()
|
||||
return cmd.Wait()
|
||||
}
|
||||
|
||||
@@ -388,10 +337,9 @@ type ptySession interface {
|
||||
Context() ssh.Context
|
||||
DisablePTYEmulation()
|
||||
RawCommand() string
|
||||
Signals(chan<- ssh.Signal)
|
||||
}
|
||||
|
||||
func (s *Server) startPTYSession(logger slog.Logger, session ptySession, magicTypeLabel string, cmd *pty.Cmd, sshPty ssh.Pty, windowSize <-chan ssh.Window) (retErr error) {
|
||||
func (s *Server) startPTYSession(session ptySession, magicTypeLabel string, cmd *pty.Cmd, sshPty ssh.Pty, windowSize <-chan ssh.Window) (retErr error) {
|
||||
s.metrics.sessionsTotal.WithLabelValues(magicTypeLabel, "yes").Add(1)
|
||||
|
||||
ctx := session.Context()
|
||||
@@ -404,7 +352,7 @@ func (s *Server) startPTYSession(logger slog.Logger, session ptySession, magicTy
|
||||
if serviceBanner != nil {
|
||||
err := showServiceBanner(session, serviceBanner)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "agent failed to show service banner", slog.Error(err))
|
||||
s.logger.Error(ctx, "agent failed to show service banner", slog.Error(err))
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "service_banner").Add(1)
|
||||
}
|
||||
}
|
||||
@@ -415,11 +363,11 @@ func (s *Server) startPTYSession(logger slog.Logger, session ptySession, magicTy
|
||||
if manifest != nil {
|
||||
err := showMOTD(s.fs, session, manifest.MOTDFile)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "agent failed to show MOTD", slog.Error(err))
|
||||
s.logger.Error(ctx, "agent failed to show MOTD", slog.Error(err))
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "motd").Add(1)
|
||||
}
|
||||
} else {
|
||||
logger.Warn(ctx, "metadata lookup failed, unable to show MOTD")
|
||||
s.logger.Warn(ctx, "metadata lookup failed, unable to show MOTD")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -428,7 +376,7 @@ func (s *Server) startPTYSession(logger slog.Logger, session ptySession, magicTy
|
||||
// The pty package sets `SSH_TTY` on supported platforms.
|
||||
ptty, process, err := pty.Start(cmd, pty.WithPTYOption(
|
||||
pty.WithSSHRequest(sshPty),
|
||||
pty.WithLogger(slog.Stdlib(ctx, logger, slog.LevelInfo)),
|
||||
pty.WithLogger(slog.Stdlib(ctx, s.logger, slog.LevelInfo)),
|
||||
))
|
||||
if err != nil {
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "start_command").Add(1)
|
||||
@@ -437,43 +385,20 @@ func (s *Server) startPTYSession(logger slog.Logger, session ptySession, magicTy
|
||||
defer func() {
|
||||
closeErr := ptty.Close()
|
||||
if closeErr != nil {
|
||||
logger.Warn(ctx, "failed to close tty", slog.Error(closeErr))
|
||||
s.logger.Warn(ctx, "failed to close tty", slog.Error(closeErr))
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "close").Add(1)
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}
|
||||
}()
|
||||
sigs := make(chan ssh.Signal, 1)
|
||||
session.Signals(sigs)
|
||||
defer func() {
|
||||
session.Signals(nil)
|
||||
close(sigs)
|
||||
}()
|
||||
go func() {
|
||||
for {
|
||||
if sigs == nil && windowSize == nil {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case sig, ok := <-sigs:
|
||||
if !ok {
|
||||
sigs = nil
|
||||
continue
|
||||
}
|
||||
s.handleSignal(logger, sig, process, magicTypeLabel)
|
||||
case win, ok := <-windowSize:
|
||||
if !ok {
|
||||
windowSize = nil
|
||||
continue
|
||||
}
|
||||
resizeErr := ptty.Resize(uint16(win.Height), uint16(win.Width))
|
||||
// If the pty is closed, then command has exited, no need to log.
|
||||
if resizeErr != nil && !errors.Is(resizeErr, pty.ErrClosed) {
|
||||
logger.Warn(ctx, "failed to resize tty", slog.Error(resizeErr))
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "resize").Add(1)
|
||||
}
|
||||
for win := range windowSize {
|
||||
resizeErr := ptty.Resize(uint16(win.Height), uint16(win.Width))
|
||||
// If the pty is closed, then command has exited, no need to log.
|
||||
if resizeErr != nil && !errors.Is(resizeErr, pty.ErrClosed) {
|
||||
s.logger.Warn(ctx, "failed to resize tty", slog.Error(resizeErr))
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "resize").Add(1)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -494,7 +419,7 @@ func (s *Server) startPTYSession(logger slog.Logger, session ptySession, magicTy
|
||||
// 2. The client hangs up, which cancels the command's Context, and go will
|
||||
// kill the command's process. This then has the same effect as (1).
|
||||
n, err := io.Copy(session, ptty.OutputReader())
|
||||
logger.Debug(ctx, "copy output done", slog.F("bytes", n), slog.Error(err))
|
||||
s.logger.Debug(ctx, "copy output done", slog.F("bytes", n), slog.Error(err))
|
||||
if err != nil {
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "output_io_copy").Add(1)
|
||||
return xerrors.Errorf("copy error: %w", err)
|
||||
@@ -507,7 +432,7 @@ func (s *Server) startPTYSession(logger slog.Logger, session ptySession, magicTy
|
||||
// ExitErrors just mean the command we run returned a non-zero exit code, which is normal
|
||||
// and not something to be concerned about. But, if it's something else, we should log it.
|
||||
if err != nil && !xerrors.As(err, &exitErr) {
|
||||
logger.Warn(ctx, "process wait exited with error", slog.Error(err))
|
||||
s.logger.Warn(ctx, "process wait exited with error", slog.Error(err))
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "wait").Add(1)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -516,19 +441,7 @@ func (s *Server) startPTYSession(logger slog.Logger, session ptySession, magicTy
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) handleSignal(logger slog.Logger, ssig ssh.Signal, signaler interface{ Signal(os.Signal) error }, magicTypeLabel string) {
|
||||
ctx := context.Background()
|
||||
sig := osSignalFrom(ssig)
|
||||
logger = logger.With(slog.F("ssh_signal", ssig), slog.F("signal", sig.String()))
|
||||
logger.Info(ctx, "received signal from client")
|
||||
err := signaler.Signal(sig)
|
||||
if err != nil {
|
||||
logger.Warn(ctx, "signaling the process failed", slog.Error(err))
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "signal").Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) sftpHandler(logger slog.Logger, session ssh.Session) {
|
||||
func (s *Server) sftpHandler(session ssh.Session) {
|
||||
s.metrics.sftpConnectionsTotal.Add(1)
|
||||
|
||||
ctx := session.Context()
|
||||
@@ -544,14 +457,14 @@ func (s *Server) sftpHandler(logger slog.Logger, session ssh.Session) {
|
||||
// directory so that SFTP connections land there.
|
||||
homedir, err := userHomeDir()
|
||||
if err != nil {
|
||||
logger.Warn(ctx, "get sftp working directory failed, unable to get home dir", slog.Error(err))
|
||||
s.logger.Warn(ctx, "get sftp working directory failed, unable to get home dir", slog.Error(err))
|
||||
} else {
|
||||
opts = append(opts, sftp.WithServerWorkingDirectory(homedir))
|
||||
}
|
||||
|
||||
server, err := sftp.NewServer(session, opts...)
|
||||
if err != nil {
|
||||
logger.Debug(ctx, "initialize sftp server", slog.Error(err))
|
||||
s.logger.Debug(ctx, "initialize sftp server", slog.Error(err))
|
||||
return
|
||||
}
|
||||
defer server.Close()
|
||||
@@ -569,7 +482,7 @@ func (s *Server) sftpHandler(logger slog.Logger, session ssh.Session) {
|
||||
_ = session.Exit(0)
|
||||
return
|
||||
}
|
||||
logger.Warn(ctx, "sftp server closed with error", slog.Error(err))
|
||||
s.logger.Warn(ctx, "sftp server closed with error", slog.Error(err))
|
||||
s.metrics.sftpServerErrors.Add(1)
|
||||
_ = session.Exit(1)
|
||||
}
|
||||
@@ -600,32 +513,8 @@ func (s *Server) CreateCommand(ctx context.Context, script string, env []string)
|
||||
if runtime.GOOS == "windows" {
|
||||
caller = "/c"
|
||||
}
|
||||
name := shell
|
||||
args := []string{caller, script}
|
||||
|
||||
// A preceding space is generally not idiomatic for a shebang,
|
||||
// but in Terraform it's quite standard to use <<EOF for a multi-line
|
||||
// string which would indent with spaces, so we accept it for user-ease.
|
||||
if strings.HasPrefix(strings.TrimSpace(script), "#!") {
|
||||
// If the script starts with a shebang, we should
|
||||
// execute it directly. This is useful for running
|
||||
// scripts that aren't executable.
|
||||
shebang := strings.SplitN(strings.TrimSpace(script), "\n", 2)[0]
|
||||
shebang = strings.TrimSpace(shebang)
|
||||
shebang = strings.TrimPrefix(shebang, "#!")
|
||||
words, err := shellquote.Split(shebang)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("split shebang: %w", err)
|
||||
}
|
||||
name = words[0]
|
||||
if len(words) > 1 {
|
||||
args = words[1:]
|
||||
} else {
|
||||
args = []string{}
|
||||
}
|
||||
args = append(args, caller, script)
|
||||
}
|
||||
|
||||
// gliderlabs/ssh returns a command slice of zero
|
||||
// when a shell is requested.
|
||||
if len(script) == 0 {
|
||||
@@ -637,7 +526,7 @@ func (s *Server) CreateCommand(ctx context.Context, script string, env []string)
|
||||
}
|
||||
}
|
||||
|
||||
cmd := pty.CommandContext(ctx, name, args...)
|
||||
cmd := pty.CommandContext(ctx, shell, args...)
|
||||
cmd.Dir = manifest.Directory
|
||||
|
||||
// If the metadata directory doesn't exist, we run the command
|
||||
@@ -659,8 +548,6 @@ func (s *Server) CreateCommand(ctx context.Context, script string, env []string)
|
||||
// Set environment variables reliable detection of being inside a
|
||||
// Coder workspace.
|
||||
cmd.Env = append(cmd.Env, "CODER=true")
|
||||
cmd.Env = append(cmd.Env, "CODER_WORKSPACE_NAME="+manifest.WorkspaceName)
|
||||
cmd.Env = append(cmd.Env, "CODER_WORKSPACE_AGENT_NAME="+manifest.AgentName)
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("USER=%s", username))
|
||||
// Git on Windows resolves with UNIX-style paths.
|
||||
// If using backslashes, it's unable to find the executable.
|
||||
@@ -681,11 +568,7 @@ func (s *Server) CreateCommand(ctx context.Context, script string, env []string)
|
||||
|
||||
// This adds the ports dialog to code-server that enables
|
||||
// proxying a port dynamically.
|
||||
// If this is empty string, do not set anything. Code-server auto defaults
|
||||
// using its basepath to construct a path based port proxy.
|
||||
if manifest.VSCodePortProxyURI != "" {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("VSCODE_PROXY_URI=%s", manifest.VSCodePortProxyURI))
|
||||
}
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("VSCODE_PROXY_URI=%s", manifest.VSCodePortProxyURI))
|
||||
|
||||
// Hide Coder message on code-server's "Getting Started" page
|
||||
cmd.Env = append(cmd.Env, "CS_DISABLE_GETTING_STARTED_OVERRIDE=true")
|
||||
|
||||
@@ -15,8 +15,8 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/pty"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
"github.com/coder/coder/pty"
|
||||
"github.com/coder/coder/testutil"
|
||||
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
)
|
||||
@@ -63,7 +63,7 @@ func Test_sessionStart_orphan(t *testing.T) {
|
||||
// we don't really care what the error is here. In the larger scenario,
|
||||
// the client has disconnected, so we can't return any error information
|
||||
// to them.
|
||||
_ = s.startPTYSession(logger, sess, "ssh", cmd, ptyInfo, windowSize)
|
||||
_ = s.startPTYSession(sess, "ssh", cmd, ptyInfo, windowSize)
|
||||
}()
|
||||
|
||||
readDone := make(chan struct{})
|
||||
@@ -114,11 +114,6 @@ type testSSHContext struct {
|
||||
context.Context
|
||||
}
|
||||
|
||||
var (
|
||||
_ gliderssh.Context = testSSHContext{}
|
||||
_ ptySession = &testSession{}
|
||||
)
|
||||
|
||||
func newTestSession(ctx context.Context) (toClient *io.PipeReader, fromClient *io.PipeWriter, s ptySession) {
|
||||
toClient, fromPty := io.Pipe()
|
||||
toPty, fromClient := io.Pipe()
|
||||
@@ -149,10 +144,6 @@ func (s *testSession) Write(p []byte) (n int, err error) {
|
||||
return s.fromPty.Write(p)
|
||||
}
|
||||
|
||||
func (*testSession) Signals(_ chan<- gliderssh.Signal) {
|
||||
// Not implemented, but will be called.
|
||||
}
|
||||
|
||||
func (testSSHContext) Lock() {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
@@ -3,12 +3,9 @@
|
||||
package agentssh_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -23,10 +20,9 @@ import (
|
||||
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
|
||||
"github.com/coder/coder/v2/agent/agentssh"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
"github.com/coder/coder/agent/agentssh"
|
||||
"github.com/coder/coder/codersdk/agentsdk"
|
||||
"github.com/coder/coder/pty/ptytest"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
@@ -60,8 +56,8 @@ func TestNewServer_ServeClient(t *testing.T) {
|
||||
|
||||
var b bytes.Buffer
|
||||
sess, err := c.NewSession()
|
||||
require.NoError(t, err)
|
||||
sess.Stdout = &b
|
||||
require.NoError(t, err)
|
||||
err = sess.Start("echo hello")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -75,42 +71,6 @@ func TestNewServer_ServeClient(t *testing.T) {
|
||||
<-done
|
||||
}
|
||||
|
||||
func TestNewServer_ExecuteShebang(t *testing.T) {
|
||||
t.Parallel()
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("bash doesn't exist on Windows")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
logger := slogtest.Make(t, nil)
|
||||
s, err := agentssh.NewServer(ctx, logger, prometheus.NewRegistry(), afero.NewMemMapFs(), 0, "")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
_ = s.Close()
|
||||
})
|
||||
s.AgentToken = func() string { return "" }
|
||||
s.Manifest = atomic.NewPointer(&agentsdk.Manifest{})
|
||||
|
||||
t.Run("Basic", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
cmd, err := s.CreateCommand(ctx, `#!/bin/bash
|
||||
echo test`, nil)
|
||||
require.NoError(t, err)
|
||||
output, err := cmd.AsExec().CombinedOutput()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "test\n", string(output))
|
||||
})
|
||||
t.Run("Args", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
cmd, err := s.CreateCommand(ctx, `#!/usr/bin/env bash
|
||||
echo test`, nil)
|
||||
require.NoError(t, err)
|
||||
output, err := cmd.AsExec().CombinedOutput()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "test\n", string(output))
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewServer_CloseActiveConnections(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -142,7 +102,6 @@ func TestNewServer_CloseActiveConnections(t *testing.T) {
|
||||
defer wg.Done()
|
||||
c := sshClient(t, ln.Addr().String())
|
||||
sess, err := c.NewSession()
|
||||
assert.NoError(t, err)
|
||||
sess.Stdin = pty.Input()
|
||||
sess.Stdout = pty.Output()
|
||||
sess.Stderr = pty.Output()
|
||||
@@ -163,159 +122,6 @@ func TestNewServer_CloseActiveConnections(t *testing.T) {
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestNewServer_Signal(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("Stdout", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := context.Background()
|
||||
logger := slogtest.Make(t, nil)
|
||||
s, err := agentssh.NewServer(ctx, logger, prometheus.NewRegistry(), afero.NewMemMapFs(), 0, "")
|
||||
require.NoError(t, err)
|
||||
defer s.Close()
|
||||
|
||||
// The assumption is that these are set before serving SSH connections.
|
||||
s.AgentToken = func() string { return "" }
|
||||
s.Manifest = atomic.NewPointer(&agentsdk.Manifest{})
|
||||
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
err := s.Serve(ln)
|
||||
assert.Error(t, err) // Server is closed.
|
||||
}()
|
||||
defer func() {
|
||||
err := s.Close()
|
||||
require.NoError(t, err)
|
||||
<-done
|
||||
}()
|
||||
|
||||
c := sshClient(t, ln.Addr().String())
|
||||
|
||||
sess, err := c.NewSession()
|
||||
require.NoError(t, err)
|
||||
r, err := sess.StdoutPipe()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Perform multiple sleeps since the interrupt signal doesn't propagate to
|
||||
// the process group, this lets us exit early.
|
||||
sleeps := strings.Repeat("sleep 1 && ", int(testutil.WaitMedium.Seconds()))
|
||||
err = sess.Start(fmt.Sprintf("echo hello && %s echo bye", sleeps))
|
||||
require.NoError(t, err)
|
||||
|
||||
sc := bufio.NewScanner(r)
|
||||
for sc.Scan() {
|
||||
t.Log(sc.Text())
|
||||
if strings.Contains(sc.Text(), "hello") {
|
||||
break
|
||||
}
|
||||
}
|
||||
require.NoError(t, sc.Err())
|
||||
|
||||
err = sess.Signal(ssh.SIGKILL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assumption, signal propagates and the command exists, closing stdout.
|
||||
for sc.Scan() {
|
||||
t.Log(sc.Text())
|
||||
require.NotContains(t, sc.Text(), "bye")
|
||||
}
|
||||
require.NoError(t, sc.Err())
|
||||
|
||||
err = sess.Wait()
|
||||
exitErr := &ssh.ExitError{}
|
||||
require.ErrorAs(t, err, &exitErr)
|
||||
wantCode := 255
|
||||
if runtime.GOOS == "windows" {
|
||||
wantCode = 1
|
||||
}
|
||||
require.Equal(t, wantCode, exitErr.ExitStatus())
|
||||
})
|
||||
t.Run("PTY", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := context.Background()
|
||||
logger := slogtest.Make(t, nil)
|
||||
s, err := agentssh.NewServer(ctx, logger, prometheus.NewRegistry(), afero.NewMemMapFs(), 0, "")
|
||||
require.NoError(t, err)
|
||||
defer s.Close()
|
||||
|
||||
// The assumption is that these are set before serving SSH connections.
|
||||
s.AgentToken = func() string { return "" }
|
||||
s.Manifest = atomic.NewPointer(&agentsdk.Manifest{})
|
||||
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
err := s.Serve(ln)
|
||||
assert.Error(t, err) // Server is closed.
|
||||
}()
|
||||
defer func() {
|
||||
err := s.Close()
|
||||
require.NoError(t, err)
|
||||
<-done
|
||||
}()
|
||||
|
||||
c := sshClient(t, ln.Addr().String())
|
||||
|
||||
pty := ptytest.New(t)
|
||||
|
||||
sess, err := c.NewSession()
|
||||
require.NoError(t, err)
|
||||
r, err := sess.StdoutPipe()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Note, we request pty but don't use ptytest here because we can't
|
||||
// easily test for no text before EOF.
|
||||
sess.Stdin = pty.Input()
|
||||
sess.Stderr = pty.Output()
|
||||
|
||||
err = sess.RequestPty("xterm", 80, 80, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Perform multiple sleeps since the interrupt signal doesn't propagate to
|
||||
// the process group, this lets us exit early.
|
||||
sleeps := strings.Repeat("sleep 1 && ", int(testutil.WaitMedium.Seconds()))
|
||||
err = sess.Start(fmt.Sprintf("echo hello && %s echo bye", sleeps))
|
||||
require.NoError(t, err)
|
||||
|
||||
sc := bufio.NewScanner(r)
|
||||
for sc.Scan() {
|
||||
t.Log(sc.Text())
|
||||
if strings.Contains(sc.Text(), "hello") {
|
||||
break
|
||||
}
|
||||
}
|
||||
require.NoError(t, sc.Err())
|
||||
|
||||
err = sess.Signal(ssh.SIGKILL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assumption, signal propagates and the command exists, closing stdout.
|
||||
for sc.Scan() {
|
||||
t.Log(sc.Text())
|
||||
require.NotContains(t, sc.Text(), "bye")
|
||||
}
|
||||
require.NoError(t, sc.Err())
|
||||
|
||||
err = sess.Wait()
|
||||
exitErr := &ssh.ExitError{}
|
||||
require.ErrorAs(t, err, &exitErr)
|
||||
wantCode := 255
|
||||
if runtime.GOOS == "windows" {
|
||||
wantCode = 1
|
||||
}
|
||||
require.Equal(t, wantCode, exitErr.ExitStatus())
|
||||
})
|
||||
}
|
||||
|
||||
func sshClient(t *testing.T, addr string) *ssh.Client {
|
||||
conn, err := net.Dial("tcp", addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
+35
-84
@@ -2,14 +2,11 @@ package agentssh
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/gliderlabs/ssh"
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
@@ -36,88 +33,61 @@ type forwardedStreamLocalPayload struct {
|
||||
type forwardedUnixHandler struct {
|
||||
sync.Mutex
|
||||
log slog.Logger
|
||||
forwards map[forwardKey]net.Listener
|
||||
}
|
||||
|
||||
type forwardKey struct {
|
||||
sessionID string
|
||||
addr string
|
||||
}
|
||||
|
||||
func newForwardedUnixHandler(log slog.Logger) *forwardedUnixHandler {
|
||||
return &forwardedUnixHandler{
|
||||
log: log,
|
||||
forwards: make(map[forwardKey]net.Listener),
|
||||
}
|
||||
forwards map[string]net.Listener
|
||||
}
|
||||
|
||||
func (h *forwardedUnixHandler) HandleSSHRequest(ctx ssh.Context, _ *ssh.Server, req *gossh.Request) (bool, []byte) {
|
||||
h.log.Debug(ctx, "handling SSH unix forward")
|
||||
h.Lock()
|
||||
if h.forwards == nil {
|
||||
h.forwards = make(map[string]net.Listener)
|
||||
}
|
||||
h.Unlock()
|
||||
conn, ok := ctx.Value(ssh.ContextKeyConn).(*gossh.ServerConn)
|
||||
if !ok {
|
||||
h.log.Warn(ctx, "SSH unix forward request from client with no gossh connection")
|
||||
return false, nil
|
||||
}
|
||||
log := h.log.With(slog.F("session_id", ctx.SessionID()), slog.F("remote_addr", conn.RemoteAddr()))
|
||||
|
||||
switch req.Type {
|
||||
case "streamlocal-forward@openssh.com":
|
||||
var reqPayload streamLocalForwardPayload
|
||||
err := gossh.Unmarshal(req.Payload, &reqPayload)
|
||||
if err != nil {
|
||||
h.log.Warn(ctx, "parse streamlocal-forward@openssh.com request (SSH unix forward) payload from client", slog.Error(err))
|
||||
h.log.Warn(ctx, "parse streamlocal-forward@openssh.com request payload from client", slog.Error(err))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
addr := reqPayload.SocketPath
|
||||
log = log.With(slog.F("socket_path", addr))
|
||||
log.Debug(ctx, "request begin SSH unix forward")
|
||||
|
||||
key := forwardKey{
|
||||
sessionID: ctx.SessionID(),
|
||||
addr: addr,
|
||||
}
|
||||
|
||||
h.Lock()
|
||||
_, ok := h.forwards[key]
|
||||
_, ok := h.forwards[addr]
|
||||
h.Unlock()
|
||||
if ok {
|
||||
// In cases where `ExitOnForwardFailure=yes` is set, returning false
|
||||
// here will cause the connection to be closed. To avoid this, and
|
||||
// to match OpenSSH behavior, we silently ignore the second forward
|
||||
// request.
|
||||
log.Warn(ctx, "SSH unix forward request for socket path that is already being forwarded on this session, ignoring")
|
||||
return true, nil
|
||||
h.log.Warn(ctx, "SSH unix forward request for socket path that is already being forwarded (maybe to another client?)",
|
||||
slog.F("socket_path", addr),
|
||||
)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Create socket parent dir if not exists.
|
||||
parentDir := filepath.Dir(addr)
|
||||
err = os.MkdirAll(parentDir, 0o700)
|
||||
if err != nil {
|
||||
log.Warn(ctx, "create parent dir for SSH unix forward request",
|
||||
h.log.Warn(ctx, "create parent dir for SSH unix forward request",
|
||||
slog.F("parent_dir", parentDir),
|
||||
slog.F("socket_path", addr),
|
||||
slog.Error(err),
|
||||
)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Remove existing socket if it exists. We do not use os.Remove() here
|
||||
// so that directories are kept. Note that it's possible that we will
|
||||
// overwrite a regular file here. Both of these behaviors match OpenSSH,
|
||||
// however, which is why we unlink.
|
||||
err = unlink(addr)
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
log.Warn(ctx, "remove existing socket for SSH unix forward request", slog.Error(err))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
lc := &net.ListenConfig{}
|
||||
ln, err := lc.Listen(ctx, "unix", addr)
|
||||
ln, err := net.Listen("unix", addr)
|
||||
if err != nil {
|
||||
log.Warn(ctx, "listen on Unix socket for SSH unix forward request", slog.Error(err))
|
||||
h.log.Warn(ctx, "listen on Unix socket for SSH unix forward request",
|
||||
slog.F("socket_path", addr),
|
||||
slog.Error(err),
|
||||
)
|
||||
return false, nil
|
||||
}
|
||||
log.Debug(ctx, "SSH unix forward listening on socket")
|
||||
|
||||
// The listener needs to successfully start before it can be added to
|
||||
// the map, so we don't have to worry about checking for an existing
|
||||
@@ -125,9 +95,8 @@ func (h *forwardedUnixHandler) HandleSSHRequest(ctx ssh.Context, _ *ssh.Server,
|
||||
//
|
||||
// This is also what the upstream TCP version of this code does.
|
||||
h.Lock()
|
||||
h.forwards[key] = ln
|
||||
h.forwards[addr] = ln
|
||||
h.Unlock()
|
||||
log.Debug(ctx, "SSH unix forward added to cache")
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
go func() {
|
||||
@@ -141,13 +110,14 @@ func (h *forwardedUnixHandler) HandleSSHRequest(ctx ssh.Context, _ *ssh.Server,
|
||||
c, err := ln.Accept()
|
||||
if err != nil {
|
||||
if !xerrors.Is(err, net.ErrClosed) {
|
||||
log.Warn(ctx, "accept on local Unix socket for SSH unix forward request", slog.Error(err))
|
||||
h.log.Warn(ctx, "accept on local Unix socket for SSH unix forward request",
|
||||
slog.F("socket_path", addr),
|
||||
slog.Error(err),
|
||||
)
|
||||
}
|
||||
// closed below
|
||||
log.Debug(ctx, "SSH unix forward listener closed")
|
||||
break
|
||||
}
|
||||
log.Debug(ctx, "accepted SSH unix forward connection")
|
||||
payload := gossh.Marshal(&forwardedStreamLocalPayload{
|
||||
SocketPath: addr,
|
||||
})
|
||||
@@ -155,7 +125,10 @@ func (h *forwardedUnixHandler) HandleSSHRequest(ctx ssh.Context, _ *ssh.Server,
|
||||
go func() {
|
||||
ch, reqs, err := conn.OpenChannel("forwarded-streamlocal@openssh.com", payload)
|
||||
if err != nil {
|
||||
h.log.Warn(ctx, "open SSH unix forward channel to client", slog.Error(err))
|
||||
h.log.Warn(ctx, "open SSH channel to forward Unix connection to client",
|
||||
slog.F("socket_path", addr),
|
||||
slog.Error(err),
|
||||
)
|
||||
_ = c.Close()
|
||||
return
|
||||
}
|
||||
@@ -165,11 +138,11 @@ func (h *forwardedUnixHandler) HandleSSHRequest(ctx ssh.Context, _ *ssh.Server,
|
||||
}
|
||||
|
||||
h.Lock()
|
||||
if ln2, ok := h.forwards[key]; ok && ln2 == ln {
|
||||
delete(h.forwards, key)
|
||||
ln2, ok := h.forwards[addr]
|
||||
if ok && ln2 == ln {
|
||||
delete(h.forwards, addr)
|
||||
}
|
||||
h.Unlock()
|
||||
log.Debug(ctx, "SSH unix forward listener removed from cache")
|
||||
_ = ln.Close()
|
||||
}()
|
||||
|
||||
@@ -179,25 +152,15 @@ func (h *forwardedUnixHandler) HandleSSHRequest(ctx ssh.Context, _ *ssh.Server,
|
||||
var reqPayload streamLocalForwardPayload
|
||||
err := gossh.Unmarshal(req.Payload, &reqPayload)
|
||||
if err != nil {
|
||||
h.log.Warn(ctx, "parse cancel-streamlocal-forward@openssh.com (SSH unix forward) request payload from client", slog.Error(err))
|
||||
h.log.Warn(ctx, "parse cancel-streamlocal-forward@openssh.com request payload from client", slog.Error(err))
|
||||
return false, nil
|
||||
}
|
||||
log.Debug(ctx, "request to cancel SSH unix forward", slog.F("socket_path", reqPayload.SocketPath))
|
||||
|
||||
key := forwardKey{
|
||||
sessionID: ctx.SessionID(),
|
||||
addr: reqPayload.SocketPath,
|
||||
}
|
||||
|
||||
h.Lock()
|
||||
ln, ok := h.forwards[key]
|
||||
delete(h.forwards, key)
|
||||
ln, ok := h.forwards[reqPayload.SocketPath]
|
||||
h.Unlock()
|
||||
if !ok {
|
||||
log.Warn(ctx, "SSH unix forward not found in cache")
|
||||
return true, nil
|
||||
if ok {
|
||||
_ = ln.Close()
|
||||
}
|
||||
_ = ln.Close()
|
||||
return true, nil
|
||||
|
||||
default:
|
||||
@@ -238,15 +201,3 @@ func directStreamLocalHandler(_ *ssh.Server, _ *gossh.ServerConn, newChan gossh.
|
||||
|
||||
Bicopy(ctx, ch, dconn)
|
||||
}
|
||||
|
||||
// unlink removes files and unlike os.Remove, directories are kept.
|
||||
func unlink(path string) error {
|
||||
// Ignore EINTR like os.Remove, see ignoringEINTR in os/file_posix.go
|
||||
// for more details.
|
||||
for {
|
||||
err := syscall.Unlink(path)
|
||||
if !errors.Is(err, syscall.EINTR) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,97 +0,0 @@
|
||||
package agentssh
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/gliderlabs/ssh"
|
||||
"go.uber.org/atomic"
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
|
||||
"cdr.dev/slog"
|
||||
)
|
||||
|
||||
// localForwardChannelData is copied from the ssh package.
|
||||
type localForwardChannelData struct {
|
||||
DestAddr string
|
||||
DestPort uint32
|
||||
|
||||
OriginAddr string
|
||||
OriginPort uint32
|
||||
}
|
||||
|
||||
// JetbrainsChannelWatcher is used to track JetBrains port forwarded (Gateway)
|
||||
// channels. If the port forward is something other than JetBrains, this struct
|
||||
// is a noop.
|
||||
type JetbrainsChannelWatcher struct {
|
||||
gossh.NewChannel
|
||||
jetbrainsCounter *atomic.Int64
|
||||
logger slog.Logger
|
||||
}
|
||||
|
||||
func NewJetbrainsChannelWatcher(ctx ssh.Context, logger slog.Logger, newChannel gossh.NewChannel, counter *atomic.Int64) gossh.NewChannel {
|
||||
d := localForwardChannelData{}
|
||||
if err := gossh.Unmarshal(newChannel.ExtraData(), &d); err != nil {
|
||||
// If the data fails to unmarshal, do nothing.
|
||||
logger.Warn(ctx, "failed to unmarshal port forward data", slog.Error(err))
|
||||
return newChannel
|
||||
}
|
||||
|
||||
// If we do get a port, we should be able to get the matching PID and from
|
||||
// there look up the invocation.
|
||||
cmdline, err := getListeningPortProcessCmdline(d.DestPort)
|
||||
if err != nil {
|
||||
logger.Warn(ctx, "failed to inspect port",
|
||||
slog.F("destination_port", d.DestPort),
|
||||
slog.Error(err))
|
||||
return newChannel
|
||||
}
|
||||
|
||||
// If this is not JetBrains, then we do not need to do anything special. We
|
||||
// attempt to match on something that appears unique to JetBrains software.
|
||||
if !strings.Contains(strings.ToLower(cmdline), strings.ToLower(MagicProcessCmdlineJetBrains)) {
|
||||
return newChannel
|
||||
}
|
||||
|
||||
logger.Debug(ctx, "discovered forwarded JetBrains process",
|
||||
slog.F("destination_port", d.DestPort))
|
||||
|
||||
return &JetbrainsChannelWatcher{
|
||||
NewChannel: newChannel,
|
||||
jetbrainsCounter: counter,
|
||||
logger: logger.With(slog.F("destination_port", d.DestPort)),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *JetbrainsChannelWatcher) Accept() (gossh.Channel, <-chan *gossh.Request, error) {
|
||||
c, r, err := w.NewChannel.Accept()
|
||||
if err != nil {
|
||||
return c, r, err
|
||||
}
|
||||
w.jetbrainsCounter.Add(1)
|
||||
// nolint: gocritic // JetBrains is a proper noun and should be capitalized
|
||||
w.logger.Debug(context.Background(), "JetBrains watcher accepted channel")
|
||||
|
||||
return &ChannelOnClose{
|
||||
Channel: c,
|
||||
done: func() {
|
||||
w.jetbrainsCounter.Add(-1)
|
||||
// nolint: gocritic // JetBrains is a proper noun and should be capitalized
|
||||
w.logger.Debug(context.Background(), "JetBrains watcher channel closed")
|
||||
},
|
||||
}, r, err
|
||||
}
|
||||
|
||||
type ChannelOnClose struct {
|
||||
gossh.Channel
|
||||
// once ensures close only decrements the counter once.
|
||||
// Because close can be called multiple times.
|
||||
once sync.Once
|
||||
done func()
|
||||
}
|
||||
|
||||
func (c *ChannelOnClose) Close() error {
|
||||
c.once.Do(c.done)
|
||||
return c.Channel.Close()
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
package agentssh
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
@@ -80,6 +78,5 @@ func magicTypeMetricLabel(magicType string) string {
|
||||
default:
|
||||
magicType = "unknown"
|
||||
}
|
||||
// Always be case insensitive
|
||||
return strings.ToLower(magicType)
|
||||
return magicType
|
||||
}
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
//go:build linux
|
||||
|
||||
package agentssh
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/cakturk/go-netstat/netstat"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
func getListeningPortProcessCmdline(port uint32) (string, error) {
|
||||
acceptFn := func(s *netstat.SockTabEntry) bool {
|
||||
return s.LocalAddr != nil && uint32(s.LocalAddr.Port) == port
|
||||
}
|
||||
tabs4, err4 := netstat.TCPSocks(acceptFn)
|
||||
tabs6, err6 := netstat.TCP6Socks(acceptFn)
|
||||
|
||||
// In the common case, we want to check ipv4 listening addresses. If this
|
||||
// fails, we should return an error. We also need to check ipv6. The
|
||||
// assumption is, if we have an err4, and 0 ipv6 addresses listed, then we are
|
||||
// interested in the err4 (and vice versa). So return both errors (at least 1
|
||||
// is non-nil) if the other list is empty.
|
||||
if (err4 != nil && len(tabs6) == 0) || (err6 != nil && len(tabs4) == 0) {
|
||||
return "", xerrors.Errorf("inspect port %d: %w", port, errors.Join(err4, err6))
|
||||
}
|
||||
|
||||
var proc *netstat.Process
|
||||
if len(tabs4) > 0 {
|
||||
proc = tabs4[0].Process
|
||||
} else if len(tabs6) > 0 {
|
||||
proc = tabs6[0].Process
|
||||
}
|
||||
if proc == nil {
|
||||
// Either nothing is listening on this port or we were unable to read the
|
||||
// process details (permission issues reading /proc/$pid/* potentially).
|
||||
// Or, perhaps /proc/net/tcp{,6} is not listing the port for some reason.
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// The process name provided by go-netstat does not include the full command
|
||||
// line so grab that instead.
|
||||
pid := proc.Pid
|
||||
data, err := os.ReadFile(fmt.Sprintf("/proc/%d/cmdline", pid))
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("read /proc/%d/cmdline: %w", pid, err)
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
//go:build !linux
|
||||
|
||||
package agentssh
|
||||
|
||||
func getListeningPortProcessCmdline(uint32) (string, error) {
|
||||
// We are not worrying about other platforms at the moment because Gateway
|
||||
// only supports Linux anyway.
|
||||
return "", nil
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
//go:build !windows
|
||||
|
||||
package agentssh
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/gliderlabs/ssh"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func osSignalFrom(sig ssh.Signal) os.Signal {
|
||||
switch sig {
|
||||
case ssh.SIGABRT:
|
||||
return unix.SIGABRT
|
||||
case ssh.SIGALRM:
|
||||
return unix.SIGALRM
|
||||
case ssh.SIGFPE:
|
||||
return unix.SIGFPE
|
||||
case ssh.SIGHUP:
|
||||
return unix.SIGHUP
|
||||
case ssh.SIGILL:
|
||||
return unix.SIGILL
|
||||
case ssh.SIGINT:
|
||||
return unix.SIGINT
|
||||
case ssh.SIGKILL:
|
||||
return unix.SIGKILL
|
||||
case ssh.SIGPIPE:
|
||||
return unix.SIGPIPE
|
||||
case ssh.SIGQUIT:
|
||||
return unix.SIGQUIT
|
||||
case ssh.SIGSEGV:
|
||||
return unix.SIGSEGV
|
||||
case ssh.SIGTERM:
|
||||
return unix.SIGTERM
|
||||
case ssh.SIGUSR1:
|
||||
return unix.SIGUSR1
|
||||
case ssh.SIGUSR2:
|
||||
return unix.SIGUSR2
|
||||
|
||||
// Unhandled, use sane fallback.
|
||||
default:
|
||||
return unix.SIGKILL
|
||||
}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
package agentssh
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/gliderlabs/ssh"
|
||||
)
|
||||
|
||||
func osSignalFrom(sig ssh.Signal) os.Signal {
|
||||
switch sig {
|
||||
// Signals are not supported on Windows.
|
||||
default:
|
||||
return os.Kill
|
||||
}
|
||||
}
|
||||
+2
-194
@@ -6,7 +6,6 @@ import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -142,7 +141,7 @@ func addXauthEntry(ctx context.Context, fs afero.Fs, host string, display string
|
||||
}
|
||||
|
||||
// Open or create the Xauthority file
|
||||
file, err := fs.OpenFile(xauthPath, os.O_RDWR|os.O_CREATE, 0o600)
|
||||
file, err := fs.OpenFile(xauthPath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0o600)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to open Xauthority file: %w", err)
|
||||
}
|
||||
@@ -154,105 +153,7 @@ func addXauthEntry(ctx context.Context, fs afero.Fs, host string, display string
|
||||
return xerrors.Errorf("failed to decode auth cookie: %w", err)
|
||||
}
|
||||
|
||||
// Read the Xauthority file and look for an existing entry for the host,
|
||||
// display, and auth protocol. If an entry is found, overwrite the auth
|
||||
// cookie (if it fits). Otherwise, mark the entry for deletion.
|
||||
type deleteEntry struct {
|
||||
start, end int
|
||||
}
|
||||
var deleteEntries []deleteEntry
|
||||
pos := 0
|
||||
updated := false
|
||||
for {
|
||||
entry, err := readXauthEntry(file)
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
return xerrors.Errorf("failed to read Xauthority entry: %w", err)
|
||||
}
|
||||
|
||||
nextPos := pos + entry.Len()
|
||||
cookieStartPos := nextPos - len(entry.authCookie)
|
||||
|
||||
if entry.family == 0x0100 && entry.address == host && entry.display == display && entry.authProtocol == authProtocol {
|
||||
if !updated && len(entry.authCookie) == len(authCookieBytes) {
|
||||
// Overwrite the auth cookie
|
||||
_, err := file.WriteAt(authCookieBytes, int64(cookieStartPos))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to write auth cookie: %w", err)
|
||||
}
|
||||
updated = true
|
||||
} else {
|
||||
// Mark entry for deletion.
|
||||
if len(deleteEntries) > 0 && deleteEntries[len(deleteEntries)-1].end == pos {
|
||||
deleteEntries[len(deleteEntries)-1].end = nextPos
|
||||
} else {
|
||||
deleteEntries = append(deleteEntries, deleteEntry{
|
||||
start: pos,
|
||||
end: nextPos,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pos = nextPos
|
||||
}
|
||||
|
||||
// In case the magic cookie changed, or we've previously bloated the
|
||||
// Xauthority file, we may have to delete entries.
|
||||
if len(deleteEntries) > 0 {
|
||||
// Read the entire file into memory. This is not ideal, but it's the
|
||||
// simplest way to delete entries from the middle of the file. The
|
||||
// Xauthority file is small, so this should be fine.
|
||||
_, err = file.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to seek Xauthority file: %w", err)
|
||||
}
|
||||
data, err := io.ReadAll(file)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to read Xauthority file: %w", err)
|
||||
}
|
||||
|
||||
// Delete the entries in reverse order.
|
||||
for i := len(deleteEntries) - 1; i >= 0; i-- {
|
||||
entry := deleteEntries[i]
|
||||
// Safety check: ensure the entry is still there.
|
||||
if entry.start > len(data) || entry.end > len(data) {
|
||||
continue
|
||||
}
|
||||
data = append(data[:entry.start], data[entry.end:]...)
|
||||
}
|
||||
|
||||
// Write the data back to the file.
|
||||
_, err = file.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to seek Xauthority file: %w", err)
|
||||
}
|
||||
_, err = file.Write(data)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to write Xauthority file: %w", err)
|
||||
}
|
||||
|
||||
// Truncate the file.
|
||||
err = file.Truncate(int64(len(data)))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to truncate Xauthority file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Return if we've already updated the entry.
|
||||
if updated {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure we're at the end (append).
|
||||
_, err = file.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to seek Xauthority file: %w", err)
|
||||
}
|
||||
|
||||
// Append Xauthority entry.
|
||||
// Write Xauthority entry
|
||||
family := uint16(0x0100) // FamilyLocal
|
||||
err = binary.Write(file, binary.BigEndian, family)
|
||||
if err != nil {
|
||||
@@ -297,96 +198,3 @@ func addXauthEntry(ctx context.Context, fs afero.Fs, host string, display string
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// xauthEntry is an representation of an Xauthority entry.
|
||||
//
|
||||
// The Xauthority file format is as follows:
|
||||
//
|
||||
// - 16-bit family
|
||||
// - 16-bit address length
|
||||
// - address
|
||||
// - 16-bit display length
|
||||
// - display
|
||||
// - 16-bit auth protocol length
|
||||
// - auth protocol
|
||||
// - 16-bit auth cookie length
|
||||
// - auth cookie
|
||||
type xauthEntry struct {
|
||||
family uint16
|
||||
address string
|
||||
display string
|
||||
authProtocol string
|
||||
authCookie []byte
|
||||
}
|
||||
|
||||
func (e xauthEntry) Len() int {
|
||||
// 5 * uint16 = 10 bytes for the family/length fields.
|
||||
return 2*5 + len(e.address) + len(e.display) + len(e.authProtocol) + len(e.authCookie)
|
||||
}
|
||||
|
||||
func readXauthEntry(r io.Reader) (xauthEntry, error) {
|
||||
var entry xauthEntry
|
||||
|
||||
// Read family
|
||||
err := binary.Read(r, binary.BigEndian, &entry.family)
|
||||
if err != nil {
|
||||
return xauthEntry{}, xerrors.Errorf("failed to read family: %w", err)
|
||||
}
|
||||
|
||||
// Read address
|
||||
var addressLength uint16
|
||||
err = binary.Read(r, binary.BigEndian, &addressLength)
|
||||
if err != nil {
|
||||
return xauthEntry{}, xerrors.Errorf("failed to read address length: %w", err)
|
||||
}
|
||||
|
||||
addressBytes := make([]byte, addressLength)
|
||||
_, err = r.Read(addressBytes)
|
||||
if err != nil {
|
||||
return xauthEntry{}, xerrors.Errorf("failed to read address: %w", err)
|
||||
}
|
||||
entry.address = string(addressBytes)
|
||||
|
||||
// Read display
|
||||
var displayLength uint16
|
||||
err = binary.Read(r, binary.BigEndian, &displayLength)
|
||||
if err != nil {
|
||||
return xauthEntry{}, xerrors.Errorf("failed to read display length: %w", err)
|
||||
}
|
||||
|
||||
displayBytes := make([]byte, displayLength)
|
||||
_, err = r.Read(displayBytes)
|
||||
if err != nil {
|
||||
return xauthEntry{}, xerrors.Errorf("failed to read display: %w", err)
|
||||
}
|
||||
entry.display = string(displayBytes)
|
||||
|
||||
// Read auth protocol
|
||||
var authProtocolLength uint16
|
||||
err = binary.Read(r, binary.BigEndian, &authProtocolLength)
|
||||
if err != nil {
|
||||
return xauthEntry{}, xerrors.Errorf("failed to read auth protocol length: %w", err)
|
||||
}
|
||||
|
||||
authProtocolBytes := make([]byte, authProtocolLength)
|
||||
_, err = r.Read(authProtocolBytes)
|
||||
if err != nil {
|
||||
return xauthEntry{}, xerrors.Errorf("failed to read auth protocol: %w", err)
|
||||
}
|
||||
entry.authProtocol = string(authProtocolBytes)
|
||||
|
||||
// Read auth cookie
|
||||
var authCookieLength uint16
|
||||
err = binary.Read(r, binary.BigEndian, &authCookieLength)
|
||||
if err != nil {
|
||||
return xauthEntry{}, xerrors.Errorf("failed to read auth cookie length: %w", err)
|
||||
}
|
||||
|
||||
entry.authCookie = make([]byte, authCookieLength)
|
||||
_, err = r.Read(entry.authCookie)
|
||||
if err != nil {
|
||||
return xauthEntry{}, xerrors.Errorf("failed to read auth cookie: %w", err)
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
@@ -1,254 +0,0 @@
|
||||
package agentssh
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Test_addXauthEntry(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type testEntry struct {
|
||||
address string
|
||||
display string
|
||||
authProtocol string
|
||||
authCookie string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
authFile []byte
|
||||
wantAuthFile []byte
|
||||
entries []testEntry
|
||||
}{
|
||||
{
|
||||
name: "add entry",
|
||||
authFile: nil,
|
||||
wantAuthFile: []byte{
|
||||
// w/unix:0 MIT-MAGIC-COOKIE-1 00
|
||||
//
|
||||
// 00000000: 0100 0001 7700 0130 0012 4d49 542d 4d41 ....w..0..MIT-MA
|
||||
// 00000010: 4749 432d 434f 4f4b 4945 2d31 0001 00 GIC-COOKIE-1...
|
||||
0x01, 0x00, 0x00, 0x01, 0x77, 0x00, 0x01, 0x30,
|
||||
0x00, 0x12, 0x4d, 0x49, 0x54, 0x2d, 0x4d, 0x41,
|
||||
0x47, 0x49, 0x43, 0x2d, 0x43, 0x4f, 0x4f, 0x4b,
|
||||
0x49, 0x45, 0x2d, 0x31, 0x00, 0x01, 0x00,
|
||||
},
|
||||
entries: []testEntry{
|
||||
{
|
||||
address: "w",
|
||||
display: "0",
|
||||
authProtocol: "MIT-MAGIC-COOKIE-1",
|
||||
authCookie: "00",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add two entries",
|
||||
authFile: []byte{},
|
||||
wantAuthFile: []byte{
|
||||
// w/unix:0 MIT-MAGIC-COOKIE-1 00
|
||||
// w/unix:1 MIT-MAGIC-COOKIE-1 11
|
||||
//
|
||||
// 00000000: 0100 0001 7700 0130 0012 4d49 542d 4d41 ....w..0..MIT-MA
|
||||
// 00000010: 4749 432d 434f 4f4b 4945 2d31 0001 0001 GIC-COOKIE-1....
|
||||
// 00000020: 0000 0177 0001 3100 124d 4954 2d4d 4147 ...w..1..MIT-MAG
|
||||
// 00000030: 4943 2d43 4f4f 4b49 452d 3100 0111 IC-COOKIE-1...
|
||||
0x01, 0x00, 0x00, 0x01, 0x77, 0x00, 0x01, 0x30,
|
||||
0x00, 0x12, 0x4d, 0x49, 0x54, 0x2d, 0x4d, 0x41,
|
||||
0x47, 0x49, 0x43, 0x2d, 0x43, 0x4f, 0x4f, 0x4b,
|
||||
0x49, 0x45, 0x2d, 0x31, 0x00, 0x01, 0x00,
|
||||
0x01, 0x00, 0x00, 0x01, 0x77, 0x00, 0x01, 0x31,
|
||||
0x00, 0x12, 0x4d, 0x49, 0x54, 0x2d, 0x4d, 0x41,
|
||||
0x47, 0x49, 0x43, 0x2d, 0x43, 0x4f, 0x4f, 0x4b,
|
||||
0x49, 0x45, 0x2d, 0x31, 0x00, 0x01, 0x11,
|
||||
},
|
||||
entries: []testEntry{
|
||||
{
|
||||
address: "w",
|
||||
display: "0",
|
||||
authProtocol: "MIT-MAGIC-COOKIE-1",
|
||||
authCookie: "00",
|
||||
},
|
||||
{
|
||||
address: "w",
|
||||
display: "1",
|
||||
authProtocol: "MIT-MAGIC-COOKIE-1",
|
||||
authCookie: "11",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "update entry with new auth cookie length",
|
||||
authFile: []byte{
|
||||
// w/unix:0 MIT-MAGIC-COOKIE-1 00
|
||||
// w/unix:1 MIT-MAGIC-COOKIE-1 11
|
||||
//
|
||||
// 00000000: 0100 0001 7700 0130 0012 4d49 542d 4d41 ....w..0..MIT-MA
|
||||
// 00000010: 4749 432d 434f 4f4b 4945 2d31 0001 0001 GIC-COOKIE-1....
|
||||
// 00000020: 0000 0177 0001 3100 124d 4954 2d4d 4147 ...w..1..MIT-MAG
|
||||
// 00000030: 4943 2d43 4f4f 4b49 452d 3100 0111 IC-COOKIE-1...
|
||||
0x01, 0x00, 0x00, 0x01, 0x77, 0x00, 0x01, 0x30,
|
||||
0x00, 0x12, 0x4d, 0x49, 0x54, 0x2d, 0x4d, 0x41,
|
||||
0x47, 0x49, 0x43, 0x2d, 0x43, 0x4f, 0x4f, 0x4b,
|
||||
0x49, 0x45, 0x2d, 0x31, 0x00, 0x01, 0x00,
|
||||
0x01, 0x00, 0x00, 0x01, 0x77, 0x00, 0x01, 0x31,
|
||||
0x00, 0x12, 0x4d, 0x49, 0x54, 0x2d, 0x4d, 0x41,
|
||||
0x47, 0x49, 0x43, 0x2d, 0x43, 0x4f, 0x4f, 0x4b,
|
||||
0x49, 0x45, 0x2d, 0x31, 0x00, 0x01, 0x11,
|
||||
},
|
||||
wantAuthFile: []byte{
|
||||
// The order changed, due to new length of auth cookie resulting
|
||||
// in remove + append, we verify that the implementation is
|
||||
// behaving as expected (changing the order is not a requirement,
|
||||
// simply an implementation detail).
|
||||
0x01, 0x00, 0x00, 0x01, 0x77, 0x00, 0x01, 0x31,
|
||||
0x00, 0x12, 0x4d, 0x49, 0x54, 0x2d, 0x4d, 0x41,
|
||||
0x47, 0x49, 0x43, 0x2d, 0x43, 0x4f, 0x4f, 0x4b,
|
||||
0x49, 0x45, 0x2d, 0x31, 0x00, 0x01, 0x11,
|
||||
0x01, 0x00, 0x00, 0x01, 0x77, 0x00, 0x01, 0x30,
|
||||
0x00, 0x12, 0x4d, 0x49, 0x54, 0x2d, 0x4d, 0x41,
|
||||
0x47, 0x49, 0x43, 0x2d, 0x43, 0x4f, 0x4f, 0x4b,
|
||||
0x49, 0x45, 0x2d, 0x31, 0x00, 0x02, 0xff, 0xff,
|
||||
},
|
||||
entries: []testEntry{
|
||||
{
|
||||
address: "w",
|
||||
display: "0",
|
||||
authProtocol: "MIT-MAGIC-COOKIE-1",
|
||||
authCookie: "ffff",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "update entry",
|
||||
authFile: []byte{
|
||||
// 00000000: 0100 0001 7700 0130 0012 4d49 542d 4d41 ....w..0..MIT-MA
|
||||
// 00000010: 4749 432d 434f 4f4b 4945 2d31 0001 0001 GIC-COOKIE-1....
|
||||
// 00000020: 0000 0177 0001 3100 124d 4954 2d4d 4147 ...w..1..MIT-MAG
|
||||
// 00000030: 4943 2d43 4f4f 4b49 452d 3100 0111 IC-COOKIE-1...
|
||||
0x01, 0x00, 0x00, 0x01, 0x77, 0x00, 0x01, 0x30,
|
||||
0x00, 0x12, 0x4d, 0x49, 0x54, 0x2d, 0x4d, 0x41,
|
||||
0x47, 0x49, 0x43, 0x2d, 0x43, 0x4f, 0x4f, 0x4b,
|
||||
0x49, 0x45, 0x2d, 0x31, 0x00, 0x01, 0x00,
|
||||
0x01, 0x00, 0x00, 0x01, 0x77, 0x00, 0x01, 0x31,
|
||||
0x00, 0x12, 0x4d, 0x49, 0x54, 0x2d, 0x4d, 0x41,
|
||||
0x47, 0x49, 0x43, 0x2d, 0x43, 0x4f, 0x4f, 0x4b,
|
||||
0x49, 0x45, 0x2d, 0x31, 0x00, 0x01, 0x11,
|
||||
},
|
||||
wantAuthFile: []byte{
|
||||
// 00000000: 0100 0001 7700 0130 0012 4d49 542d 4d41 ....w..0..MIT-MA
|
||||
// 00000010: 4749 432d 434f 4f4b 4945 2d31 0001 0001 GIC-COOKIE-1....
|
||||
// 00000020: 0000 0177 0001 3100 124d 4954 2d4d 4147 ...w..1..MIT-MAG
|
||||
// 00000030: 4943 2d43 4f4f 4b49 452d 3100 0111 IC-COOKIE-1...
|
||||
0x01, 0x00, 0x00, 0x01, 0x77, 0x00, 0x01, 0x30,
|
||||
0x00, 0x12, 0x4d, 0x49, 0x54, 0x2d, 0x4d, 0x41,
|
||||
0x47, 0x49, 0x43, 0x2d, 0x43, 0x4f, 0x4f, 0x4b,
|
||||
0x49, 0x45, 0x2d, 0x31, 0x00, 0x01, 0xff,
|
||||
0x01, 0x00, 0x00, 0x01, 0x77, 0x00, 0x01, 0x31,
|
||||
0x00, 0x12, 0x4d, 0x49, 0x54, 0x2d, 0x4d, 0x41,
|
||||
0x47, 0x49, 0x43, 0x2d, 0x43, 0x4f, 0x4f, 0x4b,
|
||||
0x49, 0x45, 0x2d, 0x31, 0x00, 0x01, 0x11,
|
||||
},
|
||||
entries: []testEntry{
|
||||
{
|
||||
address: "w",
|
||||
display: "0",
|
||||
authProtocol: "MIT-MAGIC-COOKIE-1",
|
||||
authCookie: "ff",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "clean up old entries",
|
||||
authFile: []byte{
|
||||
// w/unix:0 MIT-MAGIC-COOKIE-1 80507df050756cdefa504b65adb3bcfb
|
||||
// w/unix:0 MIT-MAGIC-COOKIE-1 267b37f6cbc11b97beb826bb1aab8570
|
||||
// w/unix:0 MIT-MAGIC-COOKIE-1 516e22e2b11d1bd0115dff09c028ca5c
|
||||
//
|
||||
// 00000000: 0100 0001 7700 0130 0012 4d49 542d 4d41 ....w..0..MIT-MA
|
||||
// 00000010: 4749 432d 434f 4f4b 4945 2d31 0010 8050 GIC-COOKIE-1...P
|
||||
// 00000020: 7df0 5075 6cde fa50 4b65 adb3 bcfb 0100 }.Pul..PKe......
|
||||
// 00000030: 0001 7700 0130 0012 4d49 542d 4d41 4749 ..w..0..MIT-MAGI
|
||||
// 00000040: 432d 434f 4f4b 4945 2d31 0010 267b 37f6 C-COOKIE-1..&{7.
|
||||
// 00000050: cbc1 1b97 beb8 26bb 1aab 8570 0100 0001 ......&....p....
|
||||
// 00000060: 7700 0130 0012 4d49 542d 4d41 4749 432d w..0..MIT-MAGIC-
|
||||
// 00000070: 434f 4f4b 4945 2d31 0010 516e 22e2 b11d COOKIE-1..Qn"...
|
||||
// 00000080: 1bd0 115d ff09 c028 ca5c ...]...(.\
|
||||
0x01, 0x00, 0x00, 0x01, 0x77, 0x00, 0x01, 0x30,
|
||||
0x00, 0x12, 0x4d, 0x49, 0x54, 0x2d, 0x4d, 0x41,
|
||||
0x47, 0x49, 0x43, 0x2d, 0x43, 0x4f, 0x4f, 0x4b,
|
||||
0x49, 0x45, 0x2d, 0x31, 0x00, 0x10, 0x80, 0x50,
|
||||
0x7d, 0xf0, 0x50, 0x75, 0x6c, 0xde, 0xfa, 0x50,
|
||||
0x4b, 0x65, 0xad, 0xb3, 0xbc, 0xfb, 0x01, 0x00,
|
||||
0x00, 0x01, 0x77, 0x00, 0x01, 0x30, 0x00, 0x12,
|
||||
0x4d, 0x49, 0x54, 0x2d, 0x4d, 0x41, 0x47, 0x49,
|
||||
0x43, 0x2d, 0x43, 0x4f, 0x4f, 0x4b, 0x49, 0x45,
|
||||
0x2d, 0x31, 0x00, 0x10, 0x26, 0x7b, 0x37, 0xf6,
|
||||
0xcb, 0xc1, 0x1b, 0x97, 0xbe, 0xb8, 0x26, 0xbb,
|
||||
0x1a, 0xab, 0x85, 0x70, 0x01, 0x00, 0x00, 0x01,
|
||||
0x77, 0x00, 0x01, 0x30, 0x00, 0x12, 0x4d, 0x49,
|
||||
0x54, 0x2d, 0x4d, 0x41, 0x47, 0x49, 0x43, 0x2d,
|
||||
0x43, 0x4f, 0x4f, 0x4b, 0x49, 0x45, 0x2d, 0x31,
|
||||
0x00, 0x10, 0x51, 0x6e, 0x22, 0xe2, 0xb1, 0x1d,
|
||||
0x1b, 0xd0, 0x11, 0x5d, 0xff, 0x09, 0xc0, 0x28,
|
||||
0xca, 0x5c,
|
||||
},
|
||||
wantAuthFile: []byte{
|
||||
// w/unix:0 MIT-MAGIC-COOKIE-1 516e5bc892b7162b844abd1fc1a7c16e
|
||||
//
|
||||
// 00000000: 0100 0001 7700 0130 0012 4d49 542d 4d41 ....w..0..MIT-MA
|
||||
// 00000010: 4749 432d 434f 4f4b 4945 2d31 0010 516e GIC-COOKIE-1..Qn
|
||||
// 00000020: 5bc8 92b7 162b 844a bd1f c1a7 c16e [....+.J.....n
|
||||
0x01, 0x00, 0x00, 0x01, 0x77, 0x00, 0x01, 0x30,
|
||||
0x00, 0x12, 0x4d, 0x49, 0x54, 0x2d, 0x4d, 0x41,
|
||||
0x47, 0x49, 0x43, 0x2d, 0x43, 0x4f, 0x4f, 0x4b,
|
||||
0x49, 0x45, 0x2d, 0x31, 0x00, 0x10, 0x51, 0x6e,
|
||||
0x5b, 0xc8, 0x92, 0xb7, 0x16, 0x2b, 0x84, 0x4a,
|
||||
0xbd, 0x1f, 0xc1, 0xa7, 0xc1, 0x6e,
|
||||
},
|
||||
entries: []testEntry{
|
||||
{
|
||||
address: "w",
|
||||
display: "0",
|
||||
authProtocol: "MIT-MAGIC-COOKIE-1",
|
||||
authCookie: "516e5bc892b7162b844abd1fc1a7c16e",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
homedir, err := os.UserHomeDir()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fs := afero.NewMemMapFs()
|
||||
if tt.authFile != nil {
|
||||
err := afero.WriteFile(fs, filepath.Join(homedir, ".Xauthority"), tt.authFile, 0o600)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for _, entry := range tt.entries {
|
||||
err := addXauthEntry(context.Background(), fs, entry.address, entry.display, entry.authProtocol, entry.authCookie)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
gotAuthFile, err := afero.ReadFile(fs, filepath.Join(homedir, ".Xauthority"))
|
||||
require.NoError(t, err)
|
||||
|
||||
if diff := cmp.Diff(tt.wantAuthFile, gotAuthFile); diff != "" {
|
||||
assert.Failf(t, "addXauthEntry() mismatch", "(-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -19,9 +19,9 @@ import (
|
||||
|
||||
"cdr.dev/slog"
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
"github.com/coder/coder/v2/agent/agentssh"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
"github.com/coder/coder/agent/agentssh"
|
||||
"github.com/coder/coder/codersdk/agentsdk"
|
||||
"github.com/coder/coder/testutil"
|
||||
)
|
||||
|
||||
func TestServer_X11(t *testing.T) {
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
package agenttest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
"github.com/coder/coder/v2/agent"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
)
|
||||
|
||||
// New starts a new agent for use in tests.
|
||||
// The agent will use the provided coder URL and session token.
|
||||
// The options passed to agent.New() can be modified by passing an optional
|
||||
// variadic func(*agent.Options).
|
||||
// Returns the agent. Closing the agent is handled by the test cleanup.
|
||||
// It is the responsibility of the caller to call coderdtest.AwaitWorkspaceAgents
|
||||
// to ensure agent is connected.
|
||||
func New(t testing.TB, coderURL *url.URL, agentToken string, opts ...func(*agent.Options)) agent.Agent {
|
||||
t.Helper()
|
||||
|
||||
var o agent.Options
|
||||
log := slogtest.Make(t, nil).Leveled(slog.LevelDebug).Named("agent")
|
||||
o.Logger = log
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(&o)
|
||||
}
|
||||
|
||||
if o.Client == nil {
|
||||
agentClient := agentsdk.New(coderURL)
|
||||
agentClient.SetSessionToken(agentToken)
|
||||
agentClient.SDK.SetLogger(log)
|
||||
o.Client = agentClient
|
||||
}
|
||||
|
||||
if o.ExchangeToken == nil {
|
||||
o.ExchangeToken = func(_ context.Context) (string, error) {
|
||||
return agentToken, nil
|
||||
}
|
||||
}
|
||||
|
||||
if o.LogDir == "" {
|
||||
o.LogDir = t.TempDir()
|
||||
}
|
||||
|
||||
agt := agent.New(o)
|
||||
t.Cleanup(func() {
|
||||
assert.NoError(t, agt.Close(), "failed to close agent during cleanup")
|
||||
})
|
||||
|
||||
return agt
|
||||
}
|
||||
+75
-157
@@ -3,28 +3,20 @@ package agenttest
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/xerrors"
|
||||
"storj.io/drpc"
|
||||
"storj.io/drpc/drpcmux"
|
||||
"storj.io/drpc/drpcserver"
|
||||
"tailscale.com/tailcfg"
|
||||
|
||||
"cdr.dev/slog"
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
drpcsdk "github.com/coder/coder/v2/codersdk/drpc"
|
||||
"github.com/coder/coder/v2/tailnet"
|
||||
"github.com/coder/coder/v2/tailnet/proto"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
"github.com/coder/coder/codersdk"
|
||||
"github.com/coder/coder/codersdk/agentsdk"
|
||||
"github.com/coder/coder/tailnet"
|
||||
"github.com/coder/coder/testutil"
|
||||
)
|
||||
|
||||
func NewClient(t testing.TB,
|
||||
@@ -37,88 +29,54 @@ func NewClient(t testing.TB,
|
||||
if manifest.AgentID == uuid.Nil {
|
||||
manifest.AgentID = agentID
|
||||
}
|
||||
coordPtr := atomic.Pointer[tailnet.Coordinator]{}
|
||||
coordPtr.Store(&coordinator)
|
||||
mux := drpcmux.New()
|
||||
derpMapUpdates := make(chan *tailcfg.DERPMap)
|
||||
drpcService := &tailnet.DRPCService{
|
||||
CoordPtr: &coordPtr,
|
||||
Logger: logger,
|
||||
DerpMapUpdateFrequency: time.Microsecond,
|
||||
DerpMapFn: func() *tailcfg.DERPMap { return <-derpMapUpdates },
|
||||
}
|
||||
err := proto.DRPCRegisterTailnet(mux, drpcService)
|
||||
require.NoError(t, err)
|
||||
mp, err := agentsdk.ProtoFromManifest(manifest)
|
||||
require.NoError(t, err)
|
||||
fakeAAPI := NewFakeAgentAPI(t, logger, mp)
|
||||
err = agentproto.DRPCRegisterAgent(mux, fakeAAPI)
|
||||
require.NoError(t, err)
|
||||
server := drpcserver.NewWithOptions(mux, drpcserver.Options{
|
||||
Log: func(err error) {
|
||||
if xerrors.Is(err, io.EOF) {
|
||||
return
|
||||
}
|
||||
logger.Debug(context.Background(), "drpc server error", slog.Error(err))
|
||||
},
|
||||
})
|
||||
return &Client{
|
||||
t: t,
|
||||
logger: logger.Named("client"),
|
||||
agentID: agentID,
|
||||
manifest: manifest,
|
||||
statsChan: statsChan,
|
||||
coordinator: coordinator,
|
||||
server: server,
|
||||
fakeAgentAPI: fakeAAPI,
|
||||
derpMapUpdates: derpMapUpdates,
|
||||
derpMapUpdates: make(chan agentsdk.DERPMapUpdate),
|
||||
}
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
t testing.TB
|
||||
logger slog.Logger
|
||||
agentID uuid.UUID
|
||||
metadata map[string]agentsdk.Metadata
|
||||
statsChan chan *agentsdk.Stats
|
||||
coordinator tailnet.Coordinator
|
||||
server *drpcserver.Server
|
||||
fakeAgentAPI *FakeAgentAPI
|
||||
LastWorkspaceAgent func()
|
||||
PatchWorkspaceLogs func() error
|
||||
t testing.TB
|
||||
logger slog.Logger
|
||||
agentID uuid.UUID
|
||||
manifest agentsdk.Manifest
|
||||
metadata map[string]agentsdk.PostMetadataRequest
|
||||
statsChan chan *agentsdk.Stats
|
||||
coordinator tailnet.Coordinator
|
||||
LastWorkspaceAgent func()
|
||||
PatchWorkspaceLogs func() error
|
||||
GetServiceBannerFunc func() (codersdk.ServiceBannerConfig, error)
|
||||
|
||||
mu sync.Mutex // Protects following.
|
||||
lifecycleStates []codersdk.WorkspaceAgentLifecycle
|
||||
startup agentsdk.PostStartupRequest
|
||||
logs []agentsdk.Log
|
||||
derpMapUpdates chan *tailcfg.DERPMap
|
||||
derpMapOnce sync.Once
|
||||
derpMapUpdates chan agentsdk.DERPMapUpdate
|
||||
}
|
||||
|
||||
func (*Client) RewriteDERPMap(*tailcfg.DERPMap) {}
|
||||
|
||||
func (c *Client) Close() {
|
||||
c.derpMapOnce.Do(func() { close(c.derpMapUpdates) })
|
||||
func (c *Client) Manifest(_ context.Context) (agentsdk.Manifest, error) {
|
||||
return c.manifest, nil
|
||||
}
|
||||
|
||||
func (c *Client) ConnectRPC(ctx context.Context) (drpc.Conn, error) {
|
||||
conn, lis := drpcsdk.MemTransportPipe()
|
||||
func (c *Client) Listen(_ context.Context) (net.Conn, error) {
|
||||
clientConn, serverConn := net.Pipe()
|
||||
closed := make(chan struct{})
|
||||
c.LastWorkspaceAgent = func() {
|
||||
_ = conn.Close()
|
||||
_ = lis.Close()
|
||||
_ = serverConn.Close()
|
||||
_ = clientConn.Close()
|
||||
<-closed
|
||||
}
|
||||
c.t.Cleanup(c.LastWorkspaceAgent)
|
||||
serveCtx, cancel := context.WithCancel(ctx)
|
||||
c.t.Cleanup(cancel)
|
||||
auth := tailnet.AgentTunnelAuth{}
|
||||
streamID := tailnet.StreamID{
|
||||
Name: "agenttest",
|
||||
ID: c.agentID,
|
||||
Auth: auth,
|
||||
}
|
||||
serveCtx = tailnet.WithStreamID(serveCtx, streamID)
|
||||
go func() {
|
||||
_ = c.server.Serve(serveCtx, lis)
|
||||
_ = c.coordinator.ServeAgent(serverConn, c.agentID, "")
|
||||
close(closed)
|
||||
}()
|
||||
return conn, nil
|
||||
return clientConn, nil
|
||||
}
|
||||
|
||||
func (c *Client) ReportStats(ctx context.Context, _ slog.Logger, statsChan <-chan *agentsdk.Stats, setInterval func(time.Duration)) (io.Closer, error) {
|
||||
@@ -167,26 +125,39 @@ func (c *Client) PostLifecycle(ctx context.Context, req agentsdk.PostLifecycleRe
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) GetStartup() <-chan *agentproto.Startup {
|
||||
return c.fakeAgentAPI.startupCh
|
||||
func (c *Client) PostAppHealth(ctx context.Context, req agentsdk.PostAppHealthsRequest) error {
|
||||
c.logger.Debug(ctx, "post app health", slog.F("req", req))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) GetMetadata() map[string]agentsdk.Metadata {
|
||||
func (c *Client) GetStartup() agentsdk.PostStartupRequest {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.startup
|
||||
}
|
||||
|
||||
func (c *Client) GetMetadata() map[string]agentsdk.PostMetadataRequest {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return maps.Clone(c.metadata)
|
||||
}
|
||||
|
||||
func (c *Client) PostMetadata(ctx context.Context, req agentsdk.PostMetadataRequest) error {
|
||||
func (c *Client) PostMetadata(ctx context.Context, key string, req agentsdk.PostMetadataRequest) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.metadata == nil {
|
||||
c.metadata = make(map[string]agentsdk.Metadata)
|
||||
}
|
||||
for _, md := range req.Metadata {
|
||||
c.metadata[md.Key] = md
|
||||
c.logger.Debug(ctx, "post metadata", slog.F("key", md.Key), slog.F("md", md))
|
||||
c.metadata = make(map[string]agentsdk.PostMetadataRequest)
|
||||
}
|
||||
c.metadata[key] = req
|
||||
c.logger.Debug(ctx, "post metadata", slog.F("key", key), slog.F("req", req))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) PostStartup(ctx context.Context, startup agentsdk.PostStartupRequest) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.startup = startup
|
||||
c.logger.Debug(ctx, "post startup", slog.F("req", startup))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -208,10 +179,23 @@ func (c *Client) PatchLogs(ctx context.Context, logs agentsdk.PatchLogs) error {
|
||||
}
|
||||
|
||||
func (c *Client) SetServiceBannerFunc(f func() (codersdk.ServiceBannerConfig, error)) {
|
||||
c.fakeAgentAPI.SetServiceBannerFunc(f)
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.GetServiceBannerFunc = f
|
||||
}
|
||||
|
||||
func (c *Client) PushDERPMapUpdate(update *tailcfg.DERPMap) error {
|
||||
func (c *Client) GetServiceBanner(ctx context.Context) (codersdk.ServiceBannerConfig, error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.logger.Debug(ctx, "get service banner")
|
||||
if c.GetServiceBannerFunc != nil {
|
||||
return c.GetServiceBannerFunc()
|
||||
}
|
||||
return codersdk.ServiceBannerConfig{}, nil
|
||||
}
|
||||
|
||||
func (c *Client) PushDERPMapUpdate(update agentsdk.DERPMapUpdate) error {
|
||||
timer := time.NewTimer(testutil.WaitShort)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
@@ -223,82 +207,16 @@ func (c *Client) PushDERPMapUpdate(update *tailcfg.DERPMap) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) DERPMapUpdates(_ context.Context) (<-chan agentsdk.DERPMapUpdate, io.Closer, error) {
|
||||
closed := make(chan struct{})
|
||||
return c.derpMapUpdates, closeFunc(func() error {
|
||||
close(closed)
|
||||
return nil
|
||||
}), nil
|
||||
}
|
||||
|
||||
type closeFunc func() error
|
||||
|
||||
func (c closeFunc) Close() error {
|
||||
return c()
|
||||
}
|
||||
|
||||
type FakeAgentAPI struct {
|
||||
sync.Mutex
|
||||
t testing.TB
|
||||
logger slog.Logger
|
||||
|
||||
manifest *agentproto.Manifest
|
||||
startupCh chan *agentproto.Startup
|
||||
|
||||
getServiceBannerFunc func() (codersdk.ServiceBannerConfig, error)
|
||||
}
|
||||
|
||||
func (f *FakeAgentAPI) GetManifest(context.Context, *agentproto.GetManifestRequest) (*agentproto.Manifest, error) {
|
||||
return f.manifest, nil
|
||||
}
|
||||
|
||||
func (f *FakeAgentAPI) SetServiceBannerFunc(fn func() (codersdk.ServiceBannerConfig, error)) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.getServiceBannerFunc = fn
|
||||
f.logger.Info(context.Background(), "updated ServiceBannerFunc")
|
||||
}
|
||||
|
||||
func (f *FakeAgentAPI) GetServiceBanner(context.Context, *agentproto.GetServiceBannerRequest) (*agentproto.ServiceBanner, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if f.getServiceBannerFunc == nil {
|
||||
return &agentproto.ServiceBanner{}, nil
|
||||
}
|
||||
sb, err := f.getServiceBannerFunc()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return agentsdk.ProtoFromServiceBanner(sb), nil
|
||||
}
|
||||
|
||||
func (*FakeAgentAPI) UpdateStats(context.Context, *agentproto.UpdateStatsRequest) (*agentproto.UpdateStatsResponse, error) {
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (*FakeAgentAPI) UpdateLifecycle(context.Context, *agentproto.UpdateLifecycleRequest) (*agentproto.Lifecycle, error) {
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (f *FakeAgentAPI) BatchUpdateAppHealths(ctx context.Context, req *agentproto.BatchUpdateAppHealthRequest) (*agentproto.BatchUpdateAppHealthResponse, error) {
|
||||
f.logger.Debug(ctx, "batch update app health", slog.F("req", req))
|
||||
return &agentproto.BatchUpdateAppHealthResponse{}, nil
|
||||
}
|
||||
|
||||
func (f *FakeAgentAPI) UpdateStartup(_ context.Context, req *agentproto.UpdateStartupRequest) (*agentproto.Startup, error) {
|
||||
f.startupCh <- req.GetStartup()
|
||||
return req.GetStartup(), nil
|
||||
}
|
||||
|
||||
func (*FakeAgentAPI) BatchUpdateMetadata(context.Context, *agentproto.BatchUpdateMetadataRequest) (*agentproto.BatchUpdateMetadataResponse, error) {
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (*FakeAgentAPI) BatchCreateLogs(context.Context, *agentproto.BatchCreateLogsRequest) (*agentproto.BatchCreateLogsResponse, error) {
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func NewFakeAgentAPI(t testing.TB, logger slog.Logger, manifest *agentproto.Manifest) *FakeAgentAPI {
|
||||
return &FakeAgentAPI{
|
||||
t: t,
|
||||
logger: logger.Named("FakeAgentAPI"),
|
||||
manifest: manifest,
|
||||
startupCh: make(chan *agentproto.Startup, 100),
|
||||
}
|
||||
}
|
||||
|
||||
+7
-20
@@ -7,8 +7,8 @@ import (
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/coderd/httpapi"
|
||||
"github.com/coder/coder/codersdk"
|
||||
)
|
||||
|
||||
func (a *agent) apiHandler() http.Handler {
|
||||
@@ -26,30 +26,17 @@ func (a *agent) apiHandler() http.Handler {
|
||||
cpy[k] = b
|
||||
}
|
||||
|
||||
cacheDuration := 1 * time.Second
|
||||
if a.portCacheDuration > 0 {
|
||||
cacheDuration = a.portCacheDuration
|
||||
}
|
||||
|
||||
lp := &listeningPortsHandler{
|
||||
ignorePorts: cpy,
|
||||
cacheDuration: cacheDuration,
|
||||
}
|
||||
lp := &listeningPortsHandler{ignorePorts: cpy}
|
||||
r.Get("/api/v0/listening-ports", lp.handler)
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
type listeningPortsHandler struct {
|
||||
ignorePorts map[int]string
|
||||
cacheDuration time.Duration
|
||||
|
||||
//nolint: unused // used on some but not all platforms
|
||||
mut sync.Mutex
|
||||
//nolint: unused // used on some but not all platforms
|
||||
ports []codersdk.WorkspaceAgentListeningPort
|
||||
//nolint: unused // used on some but not all platforms
|
||||
mtime time.Time
|
||||
mut sync.Mutex
|
||||
ports []codersdk.WorkspaceAgentListeningPort
|
||||
mtime time.Time
|
||||
ignorePorts map[int]string
|
||||
}
|
||||
|
||||
// handler returns a list of listening ports. This is tested by coderd's
|
||||
|
||||
+2
-2
@@ -10,8 +10,8 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
"github.com/coder/coder/codersdk"
|
||||
"github.com/coder/coder/codersdk/agentsdk"
|
||||
"github.com/coder/retry"
|
||||
)
|
||||
|
||||
|
||||
@@ -13,11 +13,11 @@ import (
|
||||
|
||||
"cdr.dev/slog"
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
"github.com/coder/coder/v2/agent"
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
"github.com/coder/coder/agent"
|
||||
"github.com/coder/coder/coderd/httpapi"
|
||||
"github.com/coder/coder/codersdk"
|
||||
"github.com/coder/coder/codersdk/agentsdk"
|
||||
"github.com/coder/coder/testutil"
|
||||
)
|
||||
|
||||
func TestAppHealth_Healthy(t *testing.T) {
|
||||
|
||||
+1
-13
@@ -11,15 +11,12 @@ import (
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
"github.com/coder/coder/codersdk/agentsdk"
|
||||
)
|
||||
|
||||
type agentMetrics struct {
|
||||
connectionsTotal prometheus.Counter
|
||||
reconnectingPTYErrors *prometheus.CounterVec
|
||||
// startupScriptSeconds is the time in seconds that the start script(s)
|
||||
// took to run. This is reported once per agent.
|
||||
startupScriptSeconds *prometheus.GaugeVec
|
||||
}
|
||||
|
||||
func newAgentMetrics(registerer prometheus.Registerer) *agentMetrics {
|
||||
@@ -38,18 +35,9 @@ func newAgentMetrics(registerer prometheus.Registerer) *agentMetrics {
|
||||
)
|
||||
registerer.MustRegister(reconnectingPTYErrors)
|
||||
|
||||
startupScriptSeconds := prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: "coderd",
|
||||
Subsystem: "agentstats",
|
||||
Name: "startup_script_seconds",
|
||||
Help: "Amount of time taken to run the startup script in seconds.",
|
||||
}, []string{"success"})
|
||||
registerer.MustRegister(startupScriptSeconds)
|
||||
|
||||
return &agentMetrics{
|
||||
connectionsTotal: connectionsTotal,
|
||||
reconnectingPTYErrors: reconnectingPTYErrors,
|
||||
startupScriptSeconds: startupScriptSeconds,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,14 +8,14 @@ import (
|
||||
"github.com/cakturk/go-netstat/netstat"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/codersdk"
|
||||
)
|
||||
|
||||
func (lp *listeningPortsHandler) getListeningPorts() ([]codersdk.WorkspaceAgentListeningPort, error) {
|
||||
lp.mut.Lock()
|
||||
defer lp.mut.Unlock()
|
||||
|
||||
if time.Since(lp.mtime) < lp.cacheDuration {
|
||||
if time.Since(lp.mtime) < time.Second {
|
||||
// copy
|
||||
ports := make([]codersdk.WorkspaceAgentListeningPort, len(lp.ports))
|
||||
copy(ports, lp.ports)
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
package agent
|
||||
|
||||
import "github.com/coder/coder/v2/codersdk"
|
||||
import "github.com/coder/coder/codersdk"
|
||||
|
||||
func (*listeningPortsHandler) getListeningPorts() ([]codersdk.WorkspaceAgentListeningPort, error) {
|
||||
func (lp *listeningPortsHandler) getListeningPorts() ([]codersdk.WorkspaceAgentListeningPort, error) {
|
||||
// Can't scan for ports on non-linux or non-windows_amd64 systems at the
|
||||
// moment. The UI will not show any "no ports found" message to the user, so
|
||||
// the user won't suspect a thing.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,261 +0,0 @@
|
||||
syntax = "proto3";
|
||||
option go_package = "github.com/coder/coder/v2/agent/proto";
|
||||
|
||||
package coder.agent.v2;
|
||||
|
||||
import "tailnet/proto/tailnet.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "google/protobuf/duration.proto";
|
||||
|
||||
message WorkspaceApp {
|
||||
bytes id = 1;
|
||||
string url = 2;
|
||||
bool external = 3;
|
||||
string slug = 4;
|
||||
string display_name = 5;
|
||||
string command = 6;
|
||||
string icon = 7;
|
||||
bool subdomain = 8;
|
||||
string subdomain_name = 9;
|
||||
|
||||
enum SharingLevel {
|
||||
SHARING_LEVEL_UNSPECIFIED = 0;
|
||||
OWNER = 1;
|
||||
AUTHENTICATED = 2;
|
||||
PUBLIC = 3;
|
||||
}
|
||||
SharingLevel sharing_level = 10;
|
||||
|
||||
message Healthcheck {
|
||||
string url = 1;
|
||||
google.protobuf.Duration interval = 2;
|
||||
int32 threshold = 3;
|
||||
}
|
||||
Healthcheck healthcheck = 11;
|
||||
|
||||
enum Health {
|
||||
HEALTH_UNSPECIFIED = 0;
|
||||
DISABLED = 1;
|
||||
INITIALIZING = 2;
|
||||
HEALTHY = 3;
|
||||
UNHEALTHY = 4;
|
||||
}
|
||||
Health health = 12;
|
||||
}
|
||||
|
||||
message WorkspaceAgentScript {
|
||||
bytes log_source_id = 1;
|
||||
string log_path = 2;
|
||||
string script = 3;
|
||||
string cron = 4;
|
||||
bool run_on_start = 5;
|
||||
bool run_on_stop = 6;
|
||||
bool start_blocks_login = 7;
|
||||
google.protobuf.Duration timeout = 8;
|
||||
}
|
||||
|
||||
message WorkspaceAgentMetadata {
|
||||
message Result {
|
||||
google.protobuf.Timestamp collected_at = 1;
|
||||
int64 age = 2;
|
||||
string value = 3;
|
||||
string error = 4;
|
||||
}
|
||||
Result result = 1;
|
||||
|
||||
message Description {
|
||||
string display_name = 1;
|
||||
string key = 2;
|
||||
string script = 3;
|
||||
google.protobuf.Duration interval = 4;
|
||||
google.protobuf.Duration timeout = 5;
|
||||
}
|
||||
Description description = 2;
|
||||
}
|
||||
|
||||
message Manifest {
|
||||
bytes agent_id = 1;
|
||||
string agent_name = 15;
|
||||
string owner_username = 13;
|
||||
bytes workspace_id = 14;
|
||||
string workspace_name = 16;
|
||||
uint32 git_auth_configs = 2;
|
||||
map<string, string> environment_variables = 3;
|
||||
string directory = 4;
|
||||
string vs_code_port_proxy_uri = 5;
|
||||
string motd_path = 6;
|
||||
bool disable_direct_connections = 7;
|
||||
bool derp_force_websockets = 8;
|
||||
|
||||
coder.tailnet.v2.DERPMap derp_map = 9;
|
||||
repeated WorkspaceAgentScript scripts = 10;
|
||||
repeated WorkspaceApp apps = 11;
|
||||
repeated WorkspaceAgentMetadata.Description metadata = 12;
|
||||
}
|
||||
|
||||
message GetManifestRequest {}
|
||||
|
||||
message ServiceBanner {
|
||||
bool enabled = 1;
|
||||
string message = 2;
|
||||
string background_color = 3;
|
||||
}
|
||||
|
||||
message GetServiceBannerRequest {}
|
||||
|
||||
message Stats {
|
||||
// ConnectionsByProto is a count of connections by protocol.
|
||||
map<string, int64> connections_by_proto = 1;
|
||||
// ConnectionCount is the number of connections received by an agent.
|
||||
int64 connection_count = 2;
|
||||
// ConnectionMedianLatencyMS is the median latency of all connections in milliseconds.
|
||||
double connection_median_latency_ms = 3;
|
||||
// RxPackets is the number of received packets.
|
||||
int64 rx_packets = 4;
|
||||
// RxBytes is the number of received bytes.
|
||||
int64 rx_bytes = 5;
|
||||
// TxPackets is the number of transmitted bytes.
|
||||
int64 tx_packets = 6;
|
||||
// TxBytes is the number of transmitted bytes.
|
||||
int64 tx_bytes = 7;
|
||||
|
||||
// SessionCountVSCode is the number of connections received by an agent
|
||||
// that are from our VS Code extension.
|
||||
int64 session_count_vscode = 8;
|
||||
// SessionCountJetBrains is the number of connections received by an agent
|
||||
// that are from our JetBrains extension.
|
||||
int64 session_count_jetbrains = 9;
|
||||
// SessionCountReconnectingPTY is the number of connections received by an agent
|
||||
// that are from the reconnecting web terminal.
|
||||
int64 session_count_reconnecting_pty = 10;
|
||||
// SessionCountSSH is the number of connections received by an agent
|
||||
// that are normal, non-tagged SSH sessions.
|
||||
int64 session_count_ssh = 11;
|
||||
|
||||
message Metric {
|
||||
string name = 1;
|
||||
|
||||
enum Type {
|
||||
TYPE_UNSPECIFIED = 0;
|
||||
COUNTER = 1;
|
||||
GAUGE = 2;
|
||||
}
|
||||
Type type = 2;
|
||||
|
||||
double value = 3;
|
||||
|
||||
message Label {
|
||||
string name = 1;
|
||||
string value = 2;
|
||||
}
|
||||
repeated Label labels = 4;
|
||||
}
|
||||
repeated Metric metrics = 12;
|
||||
}
|
||||
|
||||
message UpdateStatsRequest{
|
||||
Stats stats = 1;
|
||||
}
|
||||
|
||||
message UpdateStatsResponse {
|
||||
google.protobuf.Duration report_interval = 1;
|
||||
}
|
||||
|
||||
message Lifecycle {
|
||||
enum State {
|
||||
STATE_UNSPECIFIED = 0;
|
||||
CREATED = 1;
|
||||
STARTING = 2;
|
||||
START_TIMEOUT = 3;
|
||||
START_ERROR = 4;
|
||||
READY = 5;
|
||||
SHUTTING_DOWN = 6;
|
||||
SHUTDOWN_TIMEOUT = 7;
|
||||
SHUTDOWN_ERROR = 8;
|
||||
OFF = 9;
|
||||
}
|
||||
State state = 1;
|
||||
google.protobuf.Timestamp changed_at = 2;
|
||||
}
|
||||
|
||||
message UpdateLifecycleRequest {
|
||||
Lifecycle lifecycle = 1;
|
||||
}
|
||||
|
||||
enum AppHealth {
|
||||
APP_HEALTH_UNSPECIFIED = 0;
|
||||
DISABLED = 1;
|
||||
INITIALIZING = 2;
|
||||
HEALTHY = 3;
|
||||
UNHEALTHY = 4;
|
||||
}
|
||||
|
||||
message BatchUpdateAppHealthRequest {
|
||||
message HealthUpdate {
|
||||
bytes id = 1;
|
||||
AppHealth health = 2;
|
||||
}
|
||||
repeated HealthUpdate updates = 1;
|
||||
}
|
||||
|
||||
message BatchUpdateAppHealthResponse {}
|
||||
|
||||
message Startup {
|
||||
string version = 1;
|
||||
string expanded_directory = 2;
|
||||
enum Subsystem {
|
||||
SUBSYSTEM_UNSPECIFIED = 0;
|
||||
ENVBOX = 1;
|
||||
ENVBUILDER = 2;
|
||||
EXECTRACE = 3;
|
||||
}
|
||||
repeated Subsystem subsystems = 3;
|
||||
}
|
||||
|
||||
message UpdateStartupRequest{
|
||||
Startup startup = 1;
|
||||
}
|
||||
|
||||
message Metadata {
|
||||
string key = 1;
|
||||
WorkspaceAgentMetadata.Result result = 2;
|
||||
}
|
||||
|
||||
message BatchUpdateMetadataRequest {
|
||||
repeated Metadata metadata = 2;
|
||||
}
|
||||
|
||||
message BatchUpdateMetadataResponse {}
|
||||
|
||||
message Log {
|
||||
google.protobuf.Timestamp created_at = 1;
|
||||
string output = 2;
|
||||
|
||||
enum Level {
|
||||
LEVEL_UNSPECIFIED = 0;
|
||||
TRACE = 1;
|
||||
DEBUG = 2;
|
||||
INFO = 3;
|
||||
WARN = 4;
|
||||
ERROR = 5;
|
||||
}
|
||||
Level level = 3;
|
||||
}
|
||||
|
||||
message BatchCreateLogsRequest {
|
||||
bytes log_source_id = 1;
|
||||
repeated Log logs = 2;
|
||||
}
|
||||
|
||||
message BatchCreateLogsResponse {}
|
||||
|
||||
service Agent {
|
||||
rpc GetManifest(GetManifestRequest) returns (Manifest);
|
||||
rpc GetServiceBanner(GetServiceBannerRequest) returns (ServiceBanner);
|
||||
rpc UpdateStats(UpdateStatsRequest) returns (UpdateStatsResponse);
|
||||
rpc UpdateLifecycle(UpdateLifecycleRequest) returns (Lifecycle);
|
||||
rpc BatchUpdateAppHealths(BatchUpdateAppHealthRequest) returns (BatchUpdateAppHealthResponse);
|
||||
rpc UpdateStartup(UpdateStartupRequest) returns (Startup);
|
||||
rpc BatchUpdateMetadata(BatchUpdateMetadataRequest) returns (BatchUpdateMetadataResponse);
|
||||
rpc BatchCreateLogs(BatchCreateLogsRequest) returns (BatchCreateLogsResponse);
|
||||
}
|
||||
@@ -1,391 +0,0 @@
|
||||
// Code generated by protoc-gen-go-drpc. DO NOT EDIT.
|
||||
// protoc-gen-go-drpc version: v0.0.33
|
||||
// source: agent/proto/agent.proto
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
context "context"
|
||||
errors "errors"
|
||||
protojson "google.golang.org/protobuf/encoding/protojson"
|
||||
proto "google.golang.org/protobuf/proto"
|
||||
drpc "storj.io/drpc"
|
||||
drpcerr "storj.io/drpc/drpcerr"
|
||||
)
|
||||
|
||||
type drpcEncoding_File_agent_proto_agent_proto struct{}
|
||||
|
||||
func (drpcEncoding_File_agent_proto_agent_proto) Marshal(msg drpc.Message) ([]byte, error) {
|
||||
return proto.Marshal(msg.(proto.Message))
|
||||
}
|
||||
|
||||
func (drpcEncoding_File_agent_proto_agent_proto) MarshalAppend(buf []byte, msg drpc.Message) ([]byte, error) {
|
||||
return proto.MarshalOptions{}.MarshalAppend(buf, msg.(proto.Message))
|
||||
}
|
||||
|
||||
func (drpcEncoding_File_agent_proto_agent_proto) Unmarshal(buf []byte, msg drpc.Message) error {
|
||||
return proto.Unmarshal(buf, msg.(proto.Message))
|
||||
}
|
||||
|
||||
func (drpcEncoding_File_agent_proto_agent_proto) JSONMarshal(msg drpc.Message) ([]byte, error) {
|
||||
return protojson.Marshal(msg.(proto.Message))
|
||||
}
|
||||
|
||||
func (drpcEncoding_File_agent_proto_agent_proto) JSONUnmarshal(buf []byte, msg drpc.Message) error {
|
||||
return protojson.Unmarshal(buf, msg.(proto.Message))
|
||||
}
|
||||
|
||||
type DRPCAgentClient interface {
|
||||
DRPCConn() drpc.Conn
|
||||
|
||||
GetManifest(ctx context.Context, in *GetManifestRequest) (*Manifest, error)
|
||||
GetServiceBanner(ctx context.Context, in *GetServiceBannerRequest) (*ServiceBanner, error)
|
||||
UpdateStats(ctx context.Context, in *UpdateStatsRequest) (*UpdateStatsResponse, error)
|
||||
UpdateLifecycle(ctx context.Context, in *UpdateLifecycleRequest) (*Lifecycle, error)
|
||||
BatchUpdateAppHealths(ctx context.Context, in *BatchUpdateAppHealthRequest) (*BatchUpdateAppHealthResponse, error)
|
||||
UpdateStartup(ctx context.Context, in *UpdateStartupRequest) (*Startup, error)
|
||||
BatchUpdateMetadata(ctx context.Context, in *BatchUpdateMetadataRequest) (*BatchUpdateMetadataResponse, error)
|
||||
BatchCreateLogs(ctx context.Context, in *BatchCreateLogsRequest) (*BatchCreateLogsResponse, error)
|
||||
}
|
||||
|
||||
type drpcAgentClient struct {
|
||||
cc drpc.Conn
|
||||
}
|
||||
|
||||
func NewDRPCAgentClient(cc drpc.Conn) DRPCAgentClient {
|
||||
return &drpcAgentClient{cc}
|
||||
}
|
||||
|
||||
func (c *drpcAgentClient) DRPCConn() drpc.Conn { return c.cc }
|
||||
|
||||
func (c *drpcAgentClient) GetManifest(ctx context.Context, in *GetManifestRequest) (*Manifest, error) {
|
||||
out := new(Manifest)
|
||||
err := c.cc.Invoke(ctx, "/coder.agent.v2.Agent/GetManifest", drpcEncoding_File_agent_proto_agent_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentClient) GetServiceBanner(ctx context.Context, in *GetServiceBannerRequest) (*ServiceBanner, error) {
|
||||
out := new(ServiceBanner)
|
||||
err := c.cc.Invoke(ctx, "/coder.agent.v2.Agent/GetServiceBanner", drpcEncoding_File_agent_proto_agent_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentClient) UpdateStats(ctx context.Context, in *UpdateStatsRequest) (*UpdateStatsResponse, error) {
|
||||
out := new(UpdateStatsResponse)
|
||||
err := c.cc.Invoke(ctx, "/coder.agent.v2.Agent/UpdateStats", drpcEncoding_File_agent_proto_agent_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentClient) UpdateLifecycle(ctx context.Context, in *UpdateLifecycleRequest) (*Lifecycle, error) {
|
||||
out := new(Lifecycle)
|
||||
err := c.cc.Invoke(ctx, "/coder.agent.v2.Agent/UpdateLifecycle", drpcEncoding_File_agent_proto_agent_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentClient) BatchUpdateAppHealths(ctx context.Context, in *BatchUpdateAppHealthRequest) (*BatchUpdateAppHealthResponse, error) {
|
||||
out := new(BatchUpdateAppHealthResponse)
|
||||
err := c.cc.Invoke(ctx, "/coder.agent.v2.Agent/BatchUpdateAppHealths", drpcEncoding_File_agent_proto_agent_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentClient) UpdateStartup(ctx context.Context, in *UpdateStartupRequest) (*Startup, error) {
|
||||
out := new(Startup)
|
||||
err := c.cc.Invoke(ctx, "/coder.agent.v2.Agent/UpdateStartup", drpcEncoding_File_agent_proto_agent_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentClient) BatchUpdateMetadata(ctx context.Context, in *BatchUpdateMetadataRequest) (*BatchUpdateMetadataResponse, error) {
|
||||
out := new(BatchUpdateMetadataResponse)
|
||||
err := c.cc.Invoke(ctx, "/coder.agent.v2.Agent/BatchUpdateMetadata", drpcEncoding_File_agent_proto_agent_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentClient) BatchCreateLogs(ctx context.Context, in *BatchCreateLogsRequest) (*BatchCreateLogsResponse, error) {
|
||||
out := new(BatchCreateLogsResponse)
|
||||
err := c.cc.Invoke(ctx, "/coder.agent.v2.Agent/BatchCreateLogs", drpcEncoding_File_agent_proto_agent_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
type DRPCAgentServer interface {
|
||||
GetManifest(context.Context, *GetManifestRequest) (*Manifest, error)
|
||||
GetServiceBanner(context.Context, *GetServiceBannerRequest) (*ServiceBanner, error)
|
||||
UpdateStats(context.Context, *UpdateStatsRequest) (*UpdateStatsResponse, error)
|
||||
UpdateLifecycle(context.Context, *UpdateLifecycleRequest) (*Lifecycle, error)
|
||||
BatchUpdateAppHealths(context.Context, *BatchUpdateAppHealthRequest) (*BatchUpdateAppHealthResponse, error)
|
||||
UpdateStartup(context.Context, *UpdateStartupRequest) (*Startup, error)
|
||||
BatchUpdateMetadata(context.Context, *BatchUpdateMetadataRequest) (*BatchUpdateMetadataResponse, error)
|
||||
BatchCreateLogs(context.Context, *BatchCreateLogsRequest) (*BatchCreateLogsResponse, error)
|
||||
}
|
||||
|
||||
type DRPCAgentUnimplementedServer struct{}
|
||||
|
||||
func (s *DRPCAgentUnimplementedServer) GetManifest(context.Context, *GetManifestRequest) (*Manifest, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentUnimplementedServer) GetServiceBanner(context.Context, *GetServiceBannerRequest) (*ServiceBanner, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentUnimplementedServer) UpdateStats(context.Context, *UpdateStatsRequest) (*UpdateStatsResponse, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentUnimplementedServer) UpdateLifecycle(context.Context, *UpdateLifecycleRequest) (*Lifecycle, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentUnimplementedServer) BatchUpdateAppHealths(context.Context, *BatchUpdateAppHealthRequest) (*BatchUpdateAppHealthResponse, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentUnimplementedServer) UpdateStartup(context.Context, *UpdateStartupRequest) (*Startup, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentUnimplementedServer) BatchUpdateMetadata(context.Context, *BatchUpdateMetadataRequest) (*BatchUpdateMetadataResponse, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentUnimplementedServer) BatchCreateLogs(context.Context, *BatchCreateLogsRequest) (*BatchCreateLogsResponse, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
type DRPCAgentDescription struct{}
|
||||
|
||||
func (DRPCAgentDescription) NumMethods() int { return 8 }
|
||||
|
||||
func (DRPCAgentDescription) Method(n int) (string, drpc.Encoding, drpc.Receiver, interface{}, bool) {
|
||||
switch n {
|
||||
case 0:
|
||||
return "/coder.agent.v2.Agent/GetManifest", drpcEncoding_File_agent_proto_agent_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentServer).
|
||||
GetManifest(
|
||||
ctx,
|
||||
in1.(*GetManifestRequest),
|
||||
)
|
||||
}, DRPCAgentServer.GetManifest, true
|
||||
case 1:
|
||||
return "/coder.agent.v2.Agent/GetServiceBanner", drpcEncoding_File_agent_proto_agent_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentServer).
|
||||
GetServiceBanner(
|
||||
ctx,
|
||||
in1.(*GetServiceBannerRequest),
|
||||
)
|
||||
}, DRPCAgentServer.GetServiceBanner, true
|
||||
case 2:
|
||||
return "/coder.agent.v2.Agent/UpdateStats", drpcEncoding_File_agent_proto_agent_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentServer).
|
||||
UpdateStats(
|
||||
ctx,
|
||||
in1.(*UpdateStatsRequest),
|
||||
)
|
||||
}, DRPCAgentServer.UpdateStats, true
|
||||
case 3:
|
||||
return "/coder.agent.v2.Agent/UpdateLifecycle", drpcEncoding_File_agent_proto_agent_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentServer).
|
||||
UpdateLifecycle(
|
||||
ctx,
|
||||
in1.(*UpdateLifecycleRequest),
|
||||
)
|
||||
}, DRPCAgentServer.UpdateLifecycle, true
|
||||
case 4:
|
||||
return "/coder.agent.v2.Agent/BatchUpdateAppHealths", drpcEncoding_File_agent_proto_agent_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentServer).
|
||||
BatchUpdateAppHealths(
|
||||
ctx,
|
||||
in1.(*BatchUpdateAppHealthRequest),
|
||||
)
|
||||
}, DRPCAgentServer.BatchUpdateAppHealths, true
|
||||
case 5:
|
||||
return "/coder.agent.v2.Agent/UpdateStartup", drpcEncoding_File_agent_proto_agent_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentServer).
|
||||
UpdateStartup(
|
||||
ctx,
|
||||
in1.(*UpdateStartupRequest),
|
||||
)
|
||||
}, DRPCAgentServer.UpdateStartup, true
|
||||
case 6:
|
||||
return "/coder.agent.v2.Agent/BatchUpdateMetadata", drpcEncoding_File_agent_proto_agent_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentServer).
|
||||
BatchUpdateMetadata(
|
||||
ctx,
|
||||
in1.(*BatchUpdateMetadataRequest),
|
||||
)
|
||||
}, DRPCAgentServer.BatchUpdateMetadata, true
|
||||
case 7:
|
||||
return "/coder.agent.v2.Agent/BatchCreateLogs", drpcEncoding_File_agent_proto_agent_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentServer).
|
||||
BatchCreateLogs(
|
||||
ctx,
|
||||
in1.(*BatchCreateLogsRequest),
|
||||
)
|
||||
}, DRPCAgentServer.BatchCreateLogs, true
|
||||
default:
|
||||
return "", nil, nil, nil, false
|
||||
}
|
||||
}
|
||||
|
||||
func DRPCRegisterAgent(mux drpc.Mux, impl DRPCAgentServer) error {
|
||||
return mux.Register(impl, DRPCAgentDescription{})
|
||||
}
|
||||
|
||||
type DRPCAgent_GetManifestStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*Manifest) error
|
||||
}
|
||||
|
||||
type drpcAgent_GetManifestStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_GetManifestStream) SendAndClose(m *Manifest) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgent_GetServiceBannerStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*ServiceBanner) error
|
||||
}
|
||||
|
||||
type drpcAgent_GetServiceBannerStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_GetServiceBannerStream) SendAndClose(m *ServiceBanner) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgent_UpdateStatsStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*UpdateStatsResponse) error
|
||||
}
|
||||
|
||||
type drpcAgent_UpdateStatsStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_UpdateStatsStream) SendAndClose(m *UpdateStatsResponse) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgent_UpdateLifecycleStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*Lifecycle) error
|
||||
}
|
||||
|
||||
type drpcAgent_UpdateLifecycleStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_UpdateLifecycleStream) SendAndClose(m *Lifecycle) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgent_BatchUpdateAppHealthsStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*BatchUpdateAppHealthResponse) error
|
||||
}
|
||||
|
||||
type drpcAgent_BatchUpdateAppHealthsStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_BatchUpdateAppHealthsStream) SendAndClose(m *BatchUpdateAppHealthResponse) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgent_UpdateStartupStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*Startup) error
|
||||
}
|
||||
|
||||
type drpcAgent_UpdateStartupStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_UpdateStartupStream) SendAndClose(m *Startup) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgent_BatchUpdateMetadataStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*BatchUpdateMetadataResponse) error
|
||||
}
|
||||
|
||||
type drpcAgent_BatchUpdateMetadataStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_BatchUpdateMetadataStream) SendAndClose(m *BatchUpdateMetadataResponse) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgent_BatchCreateLogsStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*BatchCreateLogsResponse) error
|
||||
}
|
||||
|
||||
type drpcAgent_BatchCreateLogsStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_BatchCreateLogsStream) SendAndClose(m *BatchCreateLogsResponse) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
package proto
|
||||
|
||||
func LabelsEqual(a, b []*Stats_Metric_Label) bool {
|
||||
am := make(map[string]string, len(a))
|
||||
for _, lbl := range a {
|
||||
v := lbl.GetValue()
|
||||
if v == "" {
|
||||
// Prometheus considers empty labels as equivalent to being absent
|
||||
continue
|
||||
}
|
||||
am[lbl.GetName()] = lbl.GetValue()
|
||||
}
|
||||
lenB := 0
|
||||
for _, lbl := range b {
|
||||
v := lbl.GetValue()
|
||||
if v == "" {
|
||||
// Prometheus considers empty labels as equivalent to being absent
|
||||
continue
|
||||
}
|
||||
lenB++
|
||||
if am[lbl.GetName()] != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return len(am) == lenB
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/agent/proto"
|
||||
)
|
||||
|
||||
func TestLabelsEqual(t *testing.T) {
|
||||
t.Parallel()
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
a []*proto.Stats_Metric_Label
|
||||
b []*proto.Stats_Metric_Label
|
||||
eq bool
|
||||
}{
|
||||
{
|
||||
name: "mainlineEq",
|
||||
a: []*proto.Stats_Metric_Label{
|
||||
{Name: "credulity", Value: "sus"},
|
||||
{Name: "color", Value: "aquamarine"},
|
||||
},
|
||||
b: []*proto.Stats_Metric_Label{
|
||||
{Name: "credulity", Value: "sus"},
|
||||
{Name: "color", Value: "aquamarine"},
|
||||
},
|
||||
eq: true,
|
||||
},
|
||||
{
|
||||
name: "emptyValue",
|
||||
a: []*proto.Stats_Metric_Label{
|
||||
{Name: "credulity", Value: "sus"},
|
||||
{Name: "color", Value: "aquamarine"},
|
||||
{Name: "singularity", Value: ""},
|
||||
},
|
||||
b: []*proto.Stats_Metric_Label{
|
||||
{Name: "credulity", Value: "sus"},
|
||||
{Name: "color", Value: "aquamarine"},
|
||||
},
|
||||
eq: true,
|
||||
},
|
||||
{
|
||||
name: "extra",
|
||||
a: []*proto.Stats_Metric_Label{
|
||||
{Name: "credulity", Value: "sus"},
|
||||
{Name: "color", Value: "aquamarine"},
|
||||
{Name: "opacity", Value: "seyshells"},
|
||||
},
|
||||
b: []*proto.Stats_Metric_Label{
|
||||
{Name: "credulity", Value: "sus"},
|
||||
{Name: "color", Value: "aquamarine"},
|
||||
},
|
||||
eq: false,
|
||||
},
|
||||
{
|
||||
name: "different",
|
||||
a: []*proto.Stats_Metric_Label{
|
||||
{Name: "credulity", Value: "sus"},
|
||||
{Name: "color", Value: "aquamarine"},
|
||||
},
|
||||
b: []*proto.Stats_Metric_Label{
|
||||
{Name: "credulity", Value: "legit"},
|
||||
{Name: "color", Value: "aquamarine"},
|
||||
},
|
||||
eq: false,
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
require.Equal(t, tc.eq, proto.LabelsEqual(tc.a, tc.b))
|
||||
require.Equal(t, tc.eq, proto.LabelsEqual(tc.b, tc.a))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
package proto
|
||||
|
||||
import (
|
||||
"github.com/coder/coder/v2/tailnet/proto"
|
||||
)
|
||||
|
||||
// CurrentVersion is the current version of the agent API. It is tied to the
|
||||
// tailnet API version to avoid confusion, since agents connect to the tailnet
|
||||
// API over the same websocket.
|
||||
var CurrentVersion = proto.CurrentVersion
|
||||
@@ -14,8 +14,8 @@ import (
|
||||
"github.com/hashicorp/go-reap"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/agent/reaper"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
"github.com/coder/coder/agent/reaper"
|
||||
"github.com/coder/coder/testutil"
|
||||
)
|
||||
|
||||
// TestReap checks that's the reaper is successfully reaping
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/coder/coder/v2/pty"
|
||||
"github.com/coder/coder/pty"
|
||||
)
|
||||
|
||||
// bufferedReconnectingPTY provides a reconnectable PTY by using a ring buffer to store
|
||||
|
||||
@@ -15,8 +15,8 @@ import (
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/pty"
|
||||
"github.com/coder/coder/codersdk"
|
||||
"github.com/coder/coder/pty"
|
||||
)
|
||||
|
||||
// attachTimeout is the initial timeout for attaching and will probably be far
|
||||
@@ -196,8 +196,8 @@ func (s *ptyState) waitForStateOrContext(ctx context.Context, state State) (Stat
|
||||
// until EOF or an error writing to ptty or reading from conn.
|
||||
func readConnLoop(ctx context.Context, conn net.Conn, ptty pty.PTYCmd, metrics *prometheus.CounterVec, logger slog.Logger) {
|
||||
decoder := json.NewDecoder(conn)
|
||||
var req codersdk.ReconnectingPTYRequest
|
||||
for {
|
||||
var req codersdk.ReconnectingPTYRequest
|
||||
err := decoder.Decode(&req)
|
||||
if xerrors.Is(err, io.EOF) {
|
||||
return
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"github.com/coder/coder/v2/pty"
|
||||
"github.com/coder/coder/pty"
|
||||
)
|
||||
|
||||
// screenReconnectingPTY provides a reconnectable PTY via `screen`.
|
||||
@@ -206,13 +206,12 @@ func (rpty *screenReconnectingPTY) doAttach(ctx context.Context, conn net.Conn,
|
||||
cmd := pty.CommandContext(ctx, "screen", append([]string{
|
||||
// -S is for setting the session's name.
|
||||
"-S", rpty.id,
|
||||
// -U tells screen to use UTF-8 encoding.
|
||||
// -x allows attaching to an already attached session.
|
||||
// -RR reattaches to the daemon or creates the session daemon if missing.
|
||||
// -q disables the "New screen..." message that appears for five seconds
|
||||
// when creating a new session with -RR.
|
||||
// -c is the flag for the config file.
|
||||
"-UxRRqc", rpty.configFile,
|
||||
"-xRRqc", rpty.configFile,
|
||||
rpty.command.Path,
|
||||
// pty.Cmd duplicates Path as the first argument so remove it.
|
||||
}, rpty.command.Args[1:]...)...)
|
||||
|
||||
-126
@@ -1,126 +0,0 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
"tailscale.com/types/netlogtype"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"github.com/coder/coder/v2/agent/proto"
|
||||
)
|
||||
|
||||
const maxConns = 2048
|
||||
|
||||
type networkStatsSource interface {
|
||||
SetConnStatsCallback(maxPeriod time.Duration, maxConns int, dump func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts))
|
||||
}
|
||||
|
||||
type statsCollector interface {
|
||||
Collect(ctx context.Context, networkStats map[netlogtype.Connection]netlogtype.Counts) *proto.Stats
|
||||
}
|
||||
|
||||
type statsDest interface {
|
||||
UpdateStats(ctx context.Context, req *proto.UpdateStatsRequest) (*proto.UpdateStatsResponse, error)
|
||||
}
|
||||
|
||||
// statsReporter is a subcomponent of the agent that handles registering the stats callback on the
|
||||
// networkStatsSource (tailnet.Conn in prod), handling the callback, calling back to the
|
||||
// statsCollector (agent in prod) to collect additional stats, then sending the update to the
|
||||
// statsDest (agent API in prod)
|
||||
type statsReporter struct {
|
||||
*sync.Cond
|
||||
networkStats *map[netlogtype.Connection]netlogtype.Counts
|
||||
unreported bool
|
||||
lastInterval time.Duration
|
||||
|
||||
source networkStatsSource
|
||||
collector statsCollector
|
||||
logger slog.Logger
|
||||
}
|
||||
|
||||
func newStatsReporter(logger slog.Logger, source networkStatsSource, collector statsCollector) *statsReporter {
|
||||
return &statsReporter{
|
||||
Cond: sync.NewCond(&sync.Mutex{}),
|
||||
logger: logger,
|
||||
source: source,
|
||||
collector: collector,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *statsReporter) callback(_, _ time.Time, virtual, _ map[netlogtype.Connection]netlogtype.Counts) {
|
||||
s.L.Lock()
|
||||
defer s.L.Unlock()
|
||||
s.logger.Debug(context.Background(), "got stats callback")
|
||||
s.networkStats = &virtual
|
||||
s.unreported = true
|
||||
s.Broadcast()
|
||||
}
|
||||
|
||||
// reportLoop programs the source (tailnet.Conn) to send it stats via the
|
||||
// callback, then reports them to the dest.
|
||||
//
|
||||
// It's intended to be called within the larger retry loop that establishes a
|
||||
// connection to the agent API, then passes that connection to go routines like
|
||||
// this that use it. There is no retry and we fail on the first error since
|
||||
// this will be inside a larger retry loop.
|
||||
func (s *statsReporter) reportLoop(ctx context.Context, dest statsDest) error {
|
||||
// send an initial, blank report to get the interval
|
||||
resp, err := dest.UpdateStats(ctx, &proto.UpdateStatsRequest{})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("initial update: %w", err)
|
||||
}
|
||||
s.lastInterval = resp.ReportInterval.AsDuration()
|
||||
s.source.SetConnStatsCallback(s.lastInterval, maxConns, s.callback)
|
||||
|
||||
// use a separate goroutine to monitor the context so that we notice immediately, rather than
|
||||
// waiting for the next callback (which might never come if we are closing!)
|
||||
ctxDone := false
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
s.L.Lock()
|
||||
defer s.L.Unlock()
|
||||
ctxDone = true
|
||||
s.Broadcast()
|
||||
}()
|
||||
defer s.logger.Debug(ctx, "reportLoop exiting")
|
||||
|
||||
s.L.Lock()
|
||||
defer s.L.Unlock()
|
||||
for {
|
||||
for !s.unreported && !ctxDone {
|
||||
s.Wait()
|
||||
}
|
||||
if ctxDone {
|
||||
return nil
|
||||
}
|
||||
networkStats := *s.networkStats
|
||||
s.unreported = false
|
||||
if err = s.reportLocked(ctx, dest, networkStats); err != nil {
|
||||
return xerrors.Errorf("report stats: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *statsReporter) reportLocked(
|
||||
ctx context.Context, dest statsDest, networkStats map[netlogtype.Connection]netlogtype.Counts,
|
||||
) error {
|
||||
// here we want to do our collecting/reporting while it is unlocked, but then relock
|
||||
// when we return to reportLoop.
|
||||
s.L.Unlock()
|
||||
defer s.L.Lock()
|
||||
stats := s.collector.Collect(ctx, networkStats)
|
||||
resp, err := dest.UpdateStats(ctx, &proto.UpdateStatsRequest{Stats: stats})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
interval := resp.GetReportInterval().AsDuration()
|
||||
if interval != s.lastInterval {
|
||||
s.logger.Info(ctx, "new stats report interval", slog.F("interval", interval))
|
||||
s.lastInterval = interval
|
||||
s.source.SetConnStatsCallback(s.lastInterval, maxConns, s.callback)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,212 +0,0 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/netip"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/types/known/durationpb"
|
||||
"tailscale.com/types/ipproto"
|
||||
|
||||
"tailscale.com/types/netlogtype"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
"github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
func TestStatsReporter(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
||||
fSource := newFakeNetworkStatsSource(ctx, t)
|
||||
fCollector := newFakeCollector(t)
|
||||
fDest := newFakeStatsDest()
|
||||
uut := newStatsReporter(logger, fSource, fCollector)
|
||||
|
||||
loopErr := make(chan error, 1)
|
||||
loopCtx, loopCancel := context.WithCancel(ctx)
|
||||
go func() {
|
||||
err := uut.reportLoop(loopCtx, fDest)
|
||||
loopErr <- err
|
||||
}()
|
||||
|
||||
// initial request to get duration
|
||||
req := testutil.RequireRecvCtx(ctx, t, fDest.reqs)
|
||||
require.NotNil(t, req)
|
||||
require.Nil(t, req.Stats)
|
||||
interval := time.Second * 34
|
||||
testutil.RequireSendCtx(ctx, t, fDest.resps, &proto.UpdateStatsResponse{ReportInterval: durationpb.New(interval)})
|
||||
|
||||
// call to source to set the callback and interval
|
||||
gotInterval := testutil.RequireRecvCtx(ctx, t, fSource.period)
|
||||
require.Equal(t, interval, gotInterval)
|
||||
|
||||
// callback returning netstats
|
||||
netStats := map[netlogtype.Connection]netlogtype.Counts{
|
||||
{
|
||||
Proto: ipproto.TCP,
|
||||
Src: netip.MustParseAddrPort("192.168.1.33:4887"),
|
||||
Dst: netip.MustParseAddrPort("192.168.2.99:9999"),
|
||||
}: {
|
||||
TxPackets: 22,
|
||||
TxBytes: 23,
|
||||
RxPackets: 24,
|
||||
RxBytes: 25,
|
||||
},
|
||||
}
|
||||
fSource.callback(time.Now(), time.Now(), netStats, nil)
|
||||
|
||||
// collector called to complete the stats
|
||||
gotNetStats := testutil.RequireRecvCtx(ctx, t, fCollector.calls)
|
||||
require.Equal(t, netStats, gotNetStats)
|
||||
|
||||
// while we are collecting the stats, send in two new netStats to simulate
|
||||
// what happens if we don't keep up. Only the latest should be kept.
|
||||
netStats0 := map[netlogtype.Connection]netlogtype.Counts{
|
||||
{
|
||||
Proto: ipproto.TCP,
|
||||
Src: netip.MustParseAddrPort("192.168.1.33:4887"),
|
||||
Dst: netip.MustParseAddrPort("192.168.2.99:9999"),
|
||||
}: {
|
||||
TxPackets: 10,
|
||||
TxBytes: 10,
|
||||
RxPackets: 10,
|
||||
RxBytes: 10,
|
||||
},
|
||||
}
|
||||
fSource.callback(time.Now(), time.Now(), netStats0, nil)
|
||||
netStats1 := map[netlogtype.Connection]netlogtype.Counts{
|
||||
{
|
||||
Proto: ipproto.TCP,
|
||||
Src: netip.MustParseAddrPort("192.168.1.33:4887"),
|
||||
Dst: netip.MustParseAddrPort("192.168.2.99:9999"),
|
||||
}: {
|
||||
TxPackets: 11,
|
||||
TxBytes: 11,
|
||||
RxPackets: 11,
|
||||
RxBytes: 11,
|
||||
},
|
||||
}
|
||||
fSource.callback(time.Now(), time.Now(), netStats1, nil)
|
||||
|
||||
// complete first collection
|
||||
stats := &proto.Stats{SessionCountJetbrains: 55}
|
||||
testutil.RequireSendCtx(ctx, t, fCollector.stats, stats)
|
||||
|
||||
// destination called to report the first stats
|
||||
update := testutil.RequireRecvCtx(ctx, t, fDest.reqs)
|
||||
require.NotNil(t, update)
|
||||
require.Equal(t, stats, update.Stats)
|
||||
testutil.RequireSendCtx(ctx, t, fDest.resps, &proto.UpdateStatsResponse{ReportInterval: durationpb.New(interval)})
|
||||
|
||||
// second update -- only netStats1 is reported
|
||||
gotNetStats = testutil.RequireRecvCtx(ctx, t, fCollector.calls)
|
||||
require.Equal(t, netStats1, gotNetStats)
|
||||
stats = &proto.Stats{SessionCountJetbrains: 66}
|
||||
testutil.RequireSendCtx(ctx, t, fCollector.stats, stats)
|
||||
update = testutil.RequireRecvCtx(ctx, t, fDest.reqs)
|
||||
require.NotNil(t, update)
|
||||
require.Equal(t, stats, update.Stats)
|
||||
interval2 := 27 * time.Second
|
||||
testutil.RequireSendCtx(ctx, t, fDest.resps, &proto.UpdateStatsResponse{ReportInterval: durationpb.New(interval2)})
|
||||
|
||||
// set the new interval
|
||||
gotInterval = testutil.RequireRecvCtx(ctx, t, fSource.period)
|
||||
require.Equal(t, interval2, gotInterval)
|
||||
|
||||
loopCancel()
|
||||
err := testutil.RequireRecvCtx(ctx, t, loopErr)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
type fakeNetworkStatsSource struct {
|
||||
sync.Mutex
|
||||
ctx context.Context
|
||||
t testing.TB
|
||||
callback func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts)
|
||||
period chan time.Duration
|
||||
}
|
||||
|
||||
func (f *fakeNetworkStatsSource) SetConnStatsCallback(maxPeriod time.Duration, _ int, dump func(start time.Time, end time.Time, virtual map[netlogtype.Connection]netlogtype.Counts, physical map[netlogtype.Connection]netlogtype.Counts)) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.callback = dump
|
||||
select {
|
||||
case <-f.ctx.Done():
|
||||
f.t.Error("timeout")
|
||||
case f.period <- maxPeriod:
|
||||
// OK
|
||||
}
|
||||
}
|
||||
|
||||
func newFakeNetworkStatsSource(ctx context.Context, t testing.TB) *fakeNetworkStatsSource {
|
||||
f := &fakeNetworkStatsSource{
|
||||
ctx: ctx,
|
||||
t: t,
|
||||
period: make(chan time.Duration),
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
type fakeCollector struct {
|
||||
t testing.TB
|
||||
calls chan map[netlogtype.Connection]netlogtype.Counts
|
||||
stats chan *proto.Stats
|
||||
}
|
||||
|
||||
func (f *fakeCollector) Collect(ctx context.Context, networkStats map[netlogtype.Connection]netlogtype.Counts) *proto.Stats {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
f.t.Error("timeout on collect")
|
||||
return nil
|
||||
case f.calls <- networkStats:
|
||||
// ok
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
f.t.Error("timeout on collect")
|
||||
return nil
|
||||
case s := <-f.stats:
|
||||
return s
|
||||
}
|
||||
}
|
||||
|
||||
func newFakeCollector(t testing.TB) *fakeCollector {
|
||||
return &fakeCollector{
|
||||
t: t,
|
||||
calls: make(chan map[netlogtype.Connection]netlogtype.Counts),
|
||||
stats: make(chan *proto.Stats),
|
||||
}
|
||||
}
|
||||
|
||||
type fakeStatsDest struct {
|
||||
reqs chan *proto.UpdateStatsRequest
|
||||
resps chan *proto.UpdateStatsResponse
|
||||
}
|
||||
|
||||
func (f *fakeStatsDest) UpdateStats(ctx context.Context, req *proto.UpdateStatsRequest) (*proto.UpdateStatsResponse, error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case f.reqs <- req:
|
||||
// OK
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case resp := <-f.resps:
|
||||
return resp, nil
|
||||
}
|
||||
}
|
||||
|
||||
func newFakeStatsDest() *fakeStatsDest {
|
||||
return &fakeStatsDest{
|
||||
reqs: make(chan *proto.UpdateStatsRequest),
|
||||
resps: make(chan *proto.UpdateStatsResponse),
|
||||
}
|
||||
}
|
||||
@@ -13,10 +13,6 @@ import (
|
||||
func Get(username string) (string, error) {
|
||||
// This command will output "UserShell: /bin/zsh" if successful, we
|
||||
// can ignore the error since we have fallback behavior.
|
||||
if !filepath.IsLocal(username) {
|
||||
return "", xerrors.Errorf("username is nonlocal path: %s", username)
|
||||
}
|
||||
//nolint: gosec // input checked above
|
||||
out, _ := exec.Command("dscl", ".", "-read", filepath.Join("/Users", username), "UserShell").Output()
|
||||
s, ok := strings.CutPrefix(string(out), "UserShell: ")
|
||||
if ok {
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/agent/usershell"
|
||||
"github.com/coder/coder/agent/usershell"
|
||||
)
|
||||
|
||||
//nolint:paralleltest,tparallel // This test sets an environment variable.
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
//go:build boringcrypto
|
||||
|
||||
package buildinfo
|
||||
|
||||
import "crypto/boring"
|
||||
|
||||
var boringcrypto = boring.Enabled()
|
||||
+7
-24
@@ -30,15 +30,8 @@ var (
|
||||
)
|
||||
|
||||
const (
|
||||
// noVersion is the reported version when the version cannot be determined.
|
||||
// Usually because `go build` is run instead of `make build`.
|
||||
noVersion = "v0.0.0"
|
||||
|
||||
// develPreRelease is the pre-release tag for developer versions of the
|
||||
// application. This includes CI builds. The pre-release tag should be appended
|
||||
// to the version with a "-".
|
||||
// Example: v0.0.0-devel
|
||||
develPreRelease = "devel"
|
||||
// develPrefix is prefixed to developer versions of the application.
|
||||
develPrefix = "v0.0.0-devel"
|
||||
)
|
||||
|
||||
// Version returns the semantic version of the build.
|
||||
@@ -52,8 +45,7 @@ func Version() string {
|
||||
if tag == "" {
|
||||
// This occurs when the tag hasn't been injected,
|
||||
// like when using "go run".
|
||||
// <version>-<pre-release>+<revision>
|
||||
version = fmt.Sprintf("%s-%s%s", noVersion, develPreRelease, revision)
|
||||
version = develPrefix + revision
|
||||
return
|
||||
}
|
||||
version = "v" + tag
|
||||
@@ -71,23 +63,18 @@ func Version() string {
|
||||
// disregarded. If it detects that either version is a developer build it
|
||||
// returns true.
|
||||
func VersionsMatch(v1, v2 string) bool {
|
||||
// If no version is attached, then it is a dev build outside of CI. The version
|
||||
// will be disregarded... hopefully they know what they are doing.
|
||||
if strings.Contains(v1, noVersion) || strings.Contains(v2, noVersion) {
|
||||
// Developer versions are disregarded...hopefully they know what they are
|
||||
// doing.
|
||||
if strings.HasPrefix(v1, develPrefix) || strings.HasPrefix(v2, develPrefix) {
|
||||
return true
|
||||
}
|
||||
|
||||
return semver.MajorMinor(v1) == semver.MajorMinor(v2)
|
||||
}
|
||||
|
||||
func IsDevVersion(v string) bool {
|
||||
return strings.Contains(v, "-"+develPreRelease)
|
||||
}
|
||||
|
||||
// IsDev returns true if this is a development build.
|
||||
// CI builds are also considered development builds.
|
||||
func IsDev() bool {
|
||||
return IsDevVersion(Version())
|
||||
return strings.HasPrefix(Version(), develPrefix)
|
||||
}
|
||||
|
||||
// IsSlim returns true if this is a slim build.
|
||||
@@ -100,10 +87,6 @@ func IsAGPL() bool {
|
||||
return strings.Contains(agpl, "t")
|
||||
}
|
||||
|
||||
func IsBoringCrypto() bool {
|
||||
return boringcrypto
|
||||
}
|
||||
|
||||
// ExternalURL returns a URL referencing the current Coder version.
|
||||
// For production builds, this will link directly to a release.
|
||||
// For development builds, this will link to a commit.
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/mod/semver"
|
||||
|
||||
"github.com/coder/coder/v2/buildinfo"
|
||||
"github.com/coder/coder/buildinfo"
|
||||
)
|
||||
|
||||
func TestBuildInfo(t *testing.T) {
|
||||
@@ -57,19 +57,13 @@ func TestBuildInfo(t *testing.T) {
|
||||
expectMatch: true,
|
||||
},
|
||||
// Our CI instance uses a "-devel" prerelease
|
||||
// flag.
|
||||
// flag. This is not the same as a developer WIP build.
|
||||
{
|
||||
name: "DevelPreleaseMajor",
|
||||
name: "DevelPreleaseNotIgnored",
|
||||
v1: "v1.1.1-devel+123abac",
|
||||
v2: "v1.2.3",
|
||||
expectMatch: false,
|
||||
},
|
||||
{
|
||||
name: "DevelPreleaseSame",
|
||||
v1: "v1.1.1-devel+123abac",
|
||||
v2: "v1.1.9",
|
||||
expectMatch: true,
|
||||
},
|
||||
{
|
||||
name: "MajorMismatch",
|
||||
v1: "v1.2.3",
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
//go:build !boringcrypto
|
||||
|
||||
package buildinfo
|
||||
|
||||
var boringcrypto = false
|
||||
+16
-34
@@ -8,6 +8,7 @@ import (
|
||||
"net/http/pprof"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
@@ -27,13 +28,12 @@ import (
|
||||
"cdr.dev/slog/sloggers/sloghuman"
|
||||
"cdr.dev/slog/sloggers/slogjson"
|
||||
"cdr.dev/slog/sloggers/slogstackdriver"
|
||||
"github.com/coder/coder/v2/agent"
|
||||
"github.com/coder/coder/v2/agent/agentproc"
|
||||
"github.com/coder/coder/v2/agent/reaper"
|
||||
"github.com/coder/coder/v2/buildinfo"
|
||||
"github.com/coder/coder/v2/cli/clibase"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
"github.com/coder/coder/agent"
|
||||
"github.com/coder/coder/agent/reaper"
|
||||
"github.com/coder/coder/buildinfo"
|
||||
"github.com/coder/coder/cli/clibase"
|
||||
"github.com/coder/coder/codersdk"
|
||||
"github.com/coder/coder/codersdk/agentsdk"
|
||||
)
|
||||
|
||||
func (r *RootCmd) workspaceAgent() *clibase.Cmd {
|
||||
@@ -116,7 +116,7 @@ func (r *RootCmd) workspaceAgent() *clibase.Cmd {
|
||||
defer logWriter.Close()
|
||||
|
||||
sinks = append(sinks, sloghuman.Sink(logWriter))
|
||||
logger := inv.Logger.AppendSinks(sinks...).Leveled(slog.LevelDebug)
|
||||
logger := slog.Make(sinks...).Leveled(slog.LevelDebug)
|
||||
|
||||
logger.Info(ctx, "spawning reaper process")
|
||||
// Do not start a reaper on the child process. It's important
|
||||
@@ -143,7 +143,7 @@ func (r *RootCmd) workspaceAgent() *clibase.Cmd {
|
||||
// Note that we don't want to handle these signals in the
|
||||
// process that runs as PID 1, that's why we do this after
|
||||
// the reaper forked.
|
||||
ctx, stopNotify := inv.SignalNotifyContext(ctx, InterruptSignals...)
|
||||
ctx, stopNotify := signal.NotifyContext(ctx, InterruptSignals...)
|
||||
defer stopNotify()
|
||||
|
||||
// DumpHandler does signal handling, so we call it after the
|
||||
@@ -153,14 +153,13 @@ func (r *RootCmd) workspaceAgent() *clibase.Cmd {
|
||||
logWriter := &lumberjackWriteCloseFixer{w: &lumberjack.Logger{
|
||||
Filename: filepath.Join(logDir, "coder-agent.log"),
|
||||
MaxSize: 5, // MB
|
||||
// Per customer incident on November 17th, 2023, its helpful
|
||||
// to have the log of the last few restarts to debug a failing agent.
|
||||
MaxBackups: 10,
|
||||
// Without this, rotated logs will never be deleted.
|
||||
MaxBackups: 1,
|
||||
}}
|
||||
defer logWriter.Close()
|
||||
|
||||
sinks = append(sinks, sloghuman.Sink(logWriter))
|
||||
logger := inv.Logger.AppendSinks(sinks...).Leveled(slog.LevelDebug)
|
||||
logger := slog.Make(sinks...).Leveled(slog.LevelDebug)
|
||||
|
||||
version := buildinfo.Version()
|
||||
logger.Info(ctx, "agent is starting now",
|
||||
@@ -199,19 +198,9 @@ func (r *RootCmd) workspaceAgent() *clibase.Cmd {
|
||||
var exchangeToken func(context.Context) (agentsdk.AuthenticateResponse, error)
|
||||
switch auth {
|
||||
case "token":
|
||||
token, _ := inv.ParsedFlags().GetString(varAgentToken)
|
||||
if token == "" {
|
||||
tokenFile, _ := inv.ParsedFlags().GetString(varAgentTokenFile)
|
||||
if tokenFile != "" {
|
||||
tokenBytes, err := os.ReadFile(tokenFile)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("read token file %q: %w", tokenFile, err)
|
||||
}
|
||||
token = strings.TrimSpace(string(tokenBytes))
|
||||
}
|
||||
}
|
||||
if token == "" {
|
||||
return xerrors.Errorf("CODER_AGENT_TOKEN or CODER_AGENT_TOKEN_FILE must be set for token auth")
|
||||
token, err := inv.ParsedFlags().GetString(varAgentToken)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("CODER_AGENT_TOKEN must be set for token auth: %w", err)
|
||||
}
|
||||
client.SetSessionToken(token)
|
||||
case "google-instance-identity":
|
||||
@@ -278,8 +267,6 @@ func (r *RootCmd) workspaceAgent() *clibase.Cmd {
|
||||
subsystems = append(subsystems, subsystem)
|
||||
}
|
||||
|
||||
procTicker := time.NewTicker(time.Second)
|
||||
defer procTicker.Stop()
|
||||
agnt := agent.New(agent.Options{
|
||||
Client: client,
|
||||
Logger: logger,
|
||||
@@ -297,18 +284,13 @@ func (r *RootCmd) workspaceAgent() *clibase.Cmd {
|
||||
return resp.SessionToken, nil
|
||||
},
|
||||
EnvironmentVariables: map[string]string{
|
||||
"GIT_ASKPASS": executablePath,
|
||||
agent.EnvProcPrioMgmt: os.Getenv(agent.EnvProcPrioMgmt),
|
||||
"GIT_ASKPASS": executablePath,
|
||||
},
|
||||
IgnorePorts: ignorePorts,
|
||||
SSHMaxTimeout: sshMaxTimeout,
|
||||
Subsystems: subsystems,
|
||||
|
||||
PrometheusRegistry: prometheusRegistry,
|
||||
Syscaller: agentproc.NewSyscaller(),
|
||||
// Intentionally set this to nil. It's mainly used
|
||||
// for testing.
|
||||
ModifiedProcesses: nil,
|
||||
})
|
||||
|
||||
prometheusSrvClose := ServeHandler(ctx, logger, prometheusMetricsHandler(prometheusRegistry, logger), prometheusAddress, "prometheus")
|
||||
|
||||
+132
-103
@@ -13,14 +13,13 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/agent"
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbfake"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/provisionersdk/proto"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
"github.com/coder/coder/agent"
|
||||
"github.com/coder/coder/cli/clitest"
|
||||
"github.com/coder/coder/coderd/coderdtest"
|
||||
"github.com/coder/coder/codersdk"
|
||||
"github.com/coder/coder/provisioner/echo"
|
||||
"github.com/coder/coder/provisionersdk/proto"
|
||||
"github.com/coder/coder/pty/ptytest"
|
||||
)
|
||||
|
||||
func TestWorkspaceAgent(t *testing.T) {
|
||||
@@ -29,63 +28,83 @@ func TestWorkspaceAgent(t *testing.T) {
|
||||
t.Run("LogDirectory", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, db := coderdtest.NewWithDatabase(t, nil)
|
||||
authToken := uuid.NewString()
|
||||
client := coderdtest.New(t, &coderdtest.Options{
|
||||
IncludeProvisionerDaemon: true,
|
||||
})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
r := dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).
|
||||
WithAgent().
|
||||
Do()
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionApply: echo.ProvisionApplyWithAgent(authToken),
|
||||
})
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, client, user.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJob(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
logDir := t.TempDir()
|
||||
inv, _ := clitest.New(t,
|
||||
"agent",
|
||||
"--auth", "token",
|
||||
"--agent-token", r.AgentToken,
|
||||
"--agent-token", authToken,
|
||||
"--agent-url", client.URL.String(),
|
||||
"--log-dir", logDir,
|
||||
)
|
||||
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
|
||||
clitest.Start(t, inv)
|
||||
ctx := inv.Context()
|
||||
pty.ExpectMatchContext(ctx, "agent is starting now")
|
||||
|
||||
coderdtest.AwaitWorkspaceAgents(t, client, r.Workspace.ID)
|
||||
coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
info, err := os.Stat(filepath.Join(logDir, "coder-agent.log"))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return info.Size() > 0
|
||||
}, testutil.WaitLong, testutil.IntervalMedium)
|
||||
info, err := os.Stat(filepath.Join(logDir, "coder-agent.log"))
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, info.Size(), int64(0))
|
||||
})
|
||||
|
||||
t.Run("Azure", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
instanceID := "instanceidentifier"
|
||||
certificates, metadataClient := coderdtest.NewAzureInstanceIdentity(t, instanceID)
|
||||
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
AzureCertificates: certificates,
|
||||
client := coderdtest.New(t, &coderdtest.Options{
|
||||
AzureCertificates: certificates,
|
||||
IncludeProvisionerDaemon: true,
|
||||
})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
r := dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).WithAgent(func(agents []*proto.Agent) []*proto.Agent {
|
||||
agents[0].Auth = &proto.Agent_InstanceId{InstanceId: instanceID}
|
||||
return agents
|
||||
}).Do()
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionApply: []*proto.Provision_Response{{
|
||||
Type: &proto.Provision_Response_Complete{
|
||||
Complete: &proto.Provision_Complete{
|
||||
Resources: []*proto.Resource{{
|
||||
Name: "somename",
|
||||
Type: "someinstance",
|
||||
Agents: []*proto.Agent{{
|
||||
Auth: &proto.Agent_InstanceId{
|
||||
InstanceId: instanceID,
|
||||
},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, client, user.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJob(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
inv, _ := clitest.New(t, "agent", "--auth", "azure-instance-identity", "--agent-url", client.URL.String())
|
||||
inv = inv.WithContext(
|
||||
//nolint:revive,staticcheck
|
||||
context.WithValue(inv.Context(), "azure-client", metadataClient),
|
||||
)
|
||||
|
||||
ctx := inv.Context()
|
||||
clitest.Start(t, inv)
|
||||
coderdtest.NewWorkspaceAgentWaiter(t, client, r.Workspace.ID).
|
||||
MatchResources(matchAgentWithVersion).Wait()
|
||||
workspace, err := client.Workspace(ctx, r.Workspace.ID)
|
||||
coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
|
||||
workspace, err := client.Workspace(ctx, workspace.ID)
|
||||
require.NoError(t, err)
|
||||
resources := workspace.LatestBuild.Resources
|
||||
if assert.NotEmpty(t, workspace.LatestBuild.Resources) && assert.NotEmpty(t, resources[0].Agents) {
|
||||
@@ -101,30 +120,43 @@ func TestWorkspaceAgent(t *testing.T) {
|
||||
t.Parallel()
|
||||
instanceID := "instanceidentifier"
|
||||
certificates, metadataClient := coderdtest.NewAWSInstanceIdentity(t, instanceID)
|
||||
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
AWSCertificates: certificates,
|
||||
client := coderdtest.New(t, &coderdtest.Options{
|
||||
AWSCertificates: certificates,
|
||||
IncludeProvisionerDaemon: true,
|
||||
})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
r := dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).WithAgent(func(agents []*proto.Agent) []*proto.Agent {
|
||||
agents[0].Auth = &proto.Agent_InstanceId{InstanceId: instanceID}
|
||||
return agents
|
||||
}).Do()
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionApply: []*proto.Provision_Response{{
|
||||
Type: &proto.Provision_Response_Complete{
|
||||
Complete: &proto.Provision_Complete{
|
||||
Resources: []*proto.Resource{{
|
||||
Name: "somename",
|
||||
Type: "someinstance",
|
||||
Agents: []*proto.Agent{{
|
||||
Auth: &proto.Agent_InstanceId{
|
||||
InstanceId: instanceID,
|
||||
},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, client, user.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJob(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
inv, _ := clitest.New(t, "agent", "--auth", "aws-instance-identity", "--agent-url", client.URL.String())
|
||||
inv = inv.WithContext(
|
||||
//nolint:revive,staticcheck
|
||||
context.WithValue(inv.Context(), "aws-client", metadataClient),
|
||||
)
|
||||
|
||||
clitest.Start(t, inv)
|
||||
ctx := inv.Context()
|
||||
coderdtest.NewWorkspaceAgentWaiter(t, client, r.Workspace.ID).
|
||||
MatchResources(matchAgentWithVersion).
|
||||
Wait()
|
||||
workspace, err := client.Workspace(ctx, r.Workspace.ID)
|
||||
coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
|
||||
workspace, err := client.Workspace(ctx, workspace.ID)
|
||||
require.NoError(t, err)
|
||||
resources := workspace.LatestBuild.Resources
|
||||
if assert.NotEmpty(t, resources) && assert.NotEmpty(t, resources[0].Agents) {
|
||||
@@ -140,22 +172,37 @@ func TestWorkspaceAgent(t *testing.T) {
|
||||
t.Parallel()
|
||||
instanceID := "instanceidentifier"
|
||||
validator, metadataClient := coderdtest.NewGoogleInstanceIdentity(t, instanceID, false)
|
||||
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
GoogleTokenValidator: validator,
|
||||
client := coderdtest.New(t, &coderdtest.Options{
|
||||
GoogleTokenValidator: validator,
|
||||
IncludeProvisionerDaemon: true,
|
||||
})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
member, memberUser := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
r := dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
OrganizationID: owner.OrganizationID,
|
||||
OwnerID: memberUser.ID,
|
||||
}).WithAgent(func(agents []*proto.Agent) []*proto.Agent {
|
||||
agents[0].Auth = &proto.Agent_InstanceId{InstanceId: instanceID}
|
||||
return agents
|
||||
}).Do()
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionApply: []*proto.Provision_Response{{
|
||||
Type: &proto.Provision_Response_Complete{
|
||||
Complete: &proto.Provision_Complete{
|
||||
Resources: []*proto.Resource{{
|
||||
Name: "somename",
|
||||
Type: "someinstance",
|
||||
Agents: []*proto.Agent{{
|
||||
Auth: &proto.Agent_InstanceId{
|
||||
InstanceId: instanceID,
|
||||
},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, client, user.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJob(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
inv, cfg := clitest.New(t, "agent", "--auth", "google-instance-identity", "--agent-url", client.URL.String())
|
||||
clitest.SetupConfig(t, member, cfg)
|
||||
|
||||
ptytest.New(t).Attach(inv)
|
||||
clitest.SetupConfig(t, client, cfg)
|
||||
clitest.Start(t,
|
||||
inv.WithContext(
|
||||
//nolint:revive,staticcheck
|
||||
@@ -164,10 +211,9 @@ func TestWorkspaceAgent(t *testing.T) {
|
||||
)
|
||||
|
||||
ctx := inv.Context()
|
||||
coderdtest.NewWorkspaceAgentWaiter(t, client, r.Workspace.ID).
|
||||
MatchResources(matchAgentWithVersion).
|
||||
Wait()
|
||||
workspace, err := client.Workspace(ctx, r.Workspace.ID)
|
||||
|
||||
coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
|
||||
workspace, err := client.Workspace(ctx, workspace.ID)
|
||||
require.NoError(t, err)
|
||||
resources := workspace.LatestBuild.Resources
|
||||
if assert.NotEmpty(t, resources) && assert.NotEmpty(t, resources[0].Agents) {
|
||||
@@ -197,28 +243,37 @@ func TestWorkspaceAgent(t *testing.T) {
|
||||
t.Run("PostStartup", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, db := coderdtest.NewWithDatabase(t, nil)
|
||||
authToken := uuid.NewString()
|
||||
client := coderdtest.New(t, &coderdtest.Options{
|
||||
IncludeProvisionerDaemon: true,
|
||||
})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
r := dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).WithAgent().Do()
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionApply: echo.ProvisionApplyWithAgent(authToken),
|
||||
})
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, client, user.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJob(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
logDir := t.TempDir()
|
||||
inv, _ := clitest.New(t,
|
||||
"agent",
|
||||
"--auth", "token",
|
||||
"--agent-token", r.AgentToken,
|
||||
"--agent-token", authToken,
|
||||
"--agent-url", client.URL.String(),
|
||||
"--log-dir", logDir,
|
||||
)
|
||||
// Set the subsystems for the agent.
|
||||
inv.Environ.Set(agent.EnvAgentSubsystem, fmt.Sprintf("%s,%s", codersdk.AgentSubsystemExectrace, codersdk.AgentSubsystemEnvbox))
|
||||
|
||||
clitest.Start(t, inv)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
|
||||
resources := coderdtest.NewWorkspaceAgentWaiter(t, client, r.Workspace.ID).
|
||||
MatchResources(matchAgentWithSubsystems).Wait()
|
||||
clitest.Start(t, inv)
|
||||
pty.ExpectMatchContext(inv.Context(), "agent is starting now")
|
||||
|
||||
resources := coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
|
||||
require.Len(t, resources, 1)
|
||||
require.Len(t, resources[0].Agents, 1)
|
||||
require.Len(t, resources[0].Agents[0].Subsystems, 2)
|
||||
@@ -227,29 +282,3 @@ func TestWorkspaceAgent(t *testing.T) {
|
||||
require.Equal(t, codersdk.AgentSubsystemExectrace, resources[0].Agents[0].Subsystems[1])
|
||||
})
|
||||
}
|
||||
|
||||
func matchAgentWithVersion(rs []codersdk.WorkspaceResource) bool {
|
||||
if len(rs) < 1 {
|
||||
return false
|
||||
}
|
||||
if len(rs[0].Agents) < 1 {
|
||||
return false
|
||||
}
|
||||
if rs[0].Agents[0].Version == "" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func matchAgentWithSubsystems(rs []codersdk.WorkspaceResource) bool {
|
||||
if len(rs) < 1 {
|
||||
return false
|
||||
}
|
||||
if len(rs[0].Agents) < 1 {
|
||||
return false
|
||||
}
|
||||
if len(rs[0].Agents[0].Subsystems) < 1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/clibase"
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
func (r *RootCmd) autoupdate() *clibase.Cmd {
|
||||
client := new(codersdk.Client)
|
||||
cmd := &clibase.Cmd{
|
||||
Annotations: workspaceCommand,
|
||||
Use: "autoupdate <workspace> <always|never>",
|
||||
Short: "Toggle auto-update policy for a workspace",
|
||||
Middleware: clibase.Chain(
|
||||
clibase.RequireNArgs(2),
|
||||
r.InitClient(client),
|
||||
),
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
policy := strings.ToLower(inv.Args[1])
|
||||
err := validateAutoUpdatePolicy(policy)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("validate policy: %w", err)
|
||||
}
|
||||
|
||||
workspace, err := namedWorkspace(inv.Context(), client, inv.Args[0])
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get workspace: %w", err)
|
||||
}
|
||||
|
||||
err = client.UpdateWorkspaceAutomaticUpdates(inv.Context(), workspace.ID, codersdk.UpdateWorkspaceAutomaticUpdatesRequest{
|
||||
AutomaticUpdates: codersdk.AutomaticUpdates(policy),
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("update workspace automatic updates policy: %w", err)
|
||||
}
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "Updated workspace %q auto-update policy to %q\n", workspace.Name, policy)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Options = append(cmd.Options, cliui.SkipPromptOption())
|
||||
return cmd
|
||||
}
|
||||
|
||||
func validateAutoUpdatePolicy(arg string) error {
|
||||
switch codersdk.AutomaticUpdates(arg) {
|
||||
case codersdk.AutomaticUpdatesAlways, codersdk.AutomaticUpdatesNever:
|
||||
return nil
|
||||
default:
|
||||
return xerrors.Errorf("invalid option %q must be either of %q or %q", arg, codersdk.AutomaticUpdatesAlways, codersdk.AutomaticUpdatesNever)
|
||||
}
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
func TestAutoUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
member, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, nil)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, member, owner.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
require.Equal(t, codersdk.AutomaticUpdatesNever, workspace.AutomaticUpdates)
|
||||
|
||||
expectedPolicy := codersdk.AutomaticUpdatesAlways
|
||||
inv, root := clitest.New(t, "autoupdate", workspace.Name, string(expectedPolicy))
|
||||
clitest.SetupConfig(t, member, root)
|
||||
var buf bytes.Buffer
|
||||
inv.Stdout = &buf
|
||||
err := inv.Run()
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, buf.String(), fmt.Sprintf("Updated workspace %q auto-update policy to %q", workspace.Name, expectedPolicy))
|
||||
|
||||
workspace = coderdtest.MustWorkspace(t, client, workspace.ID)
|
||||
require.Equal(t, expectedPolicy, workspace.AutomaticUpdates)
|
||||
})
|
||||
|
||||
t.Run("InvalidArgs", func(t *testing.T) {
|
||||
type testcase struct {
|
||||
Name string
|
||||
Args []string
|
||||
ErrorContains string
|
||||
}
|
||||
|
||||
cases := []testcase{
|
||||
{
|
||||
Name: "NoPolicy",
|
||||
Args: []string{"autoupdate", "ws"},
|
||||
ErrorContains: "wanted 2 args but got 1",
|
||||
},
|
||||
{
|
||||
Name: "InvalidPolicy",
|
||||
Args: []string{"autoupdate", "ws", "sometimes"},
|
||||
ErrorContains: `invalid option "sometimes" must be either of`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
c := c
|
||||
t.Run(c.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, nil)
|
||||
_ = coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
inv, root := clitest.New(t, c.Args...)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
err := inv.Run()
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), c.ErrorContains)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
+1
-41
@@ -7,19 +7,15 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"testing"
|
||||
"unicode"
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/exp/slices"
|
||||
"golang.org/x/xerrors"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/util/slice"
|
||||
"github.com/coder/coder/coderd/util/slice"
|
||||
)
|
||||
|
||||
// Cmd describes an executable command.
|
||||
@@ -172,7 +168,6 @@ func (c *Cmd) Invoke(args ...string) *Invocation {
|
||||
Stdout: io.Discard,
|
||||
Stderr: io.Discard,
|
||||
Stdin: strings.NewReader(""),
|
||||
Logger: slog.Make(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -188,11 +183,6 @@ type Invocation struct {
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
Stdin io.Reader
|
||||
Logger slog.Logger
|
||||
Net Net
|
||||
|
||||
// testing
|
||||
signalNotifyContext func(parent context.Context, signals ...os.Signal) (ctx context.Context, stop context.CancelFunc)
|
||||
}
|
||||
|
||||
// WithOS returns the invocation as a main package, filling in the invocation's unset
|
||||
@@ -204,36 +194,6 @@ func (inv *Invocation) WithOS() *Invocation {
|
||||
i.Stdin = os.Stdin
|
||||
i.Args = os.Args[1:]
|
||||
i.Environ = ParseEnviron(os.Environ(), "")
|
||||
i.Net = osNet{}
|
||||
})
|
||||
}
|
||||
|
||||
// WithTestSignalNotifyContext allows overriding the default implementation of SignalNotifyContext.
|
||||
// This should only be used in testing.
|
||||
func (inv *Invocation) WithTestSignalNotifyContext(
|
||||
_ testing.TB, // ensure we only call this from tests
|
||||
f func(parent context.Context, signals ...os.Signal) (ctx context.Context, stop context.CancelFunc),
|
||||
) *Invocation {
|
||||
return inv.with(func(i *Invocation) {
|
||||
i.signalNotifyContext = f
|
||||
})
|
||||
}
|
||||
|
||||
// SignalNotifyContext is equivalent to signal.NotifyContext, but supports being overridden in
|
||||
// tests.
|
||||
func (inv *Invocation) SignalNotifyContext(parent context.Context, signals ...os.Signal) (ctx context.Context, stop context.CancelFunc) {
|
||||
if inv.signalNotifyContext == nil {
|
||||
return signal.NotifyContext(parent, signals...)
|
||||
}
|
||||
return inv.signalNotifyContext(parent, signals...)
|
||||
}
|
||||
|
||||
func (inv *Invocation) WithTestParsedFlags(
|
||||
_ testing.TB, // ensure we only call this from tests
|
||||
parsedFlags *pflag.FlagSet,
|
||||
) *Invocation {
|
||||
return inv.with(func(i *Invocation) {
|
||||
i.parsedFlags = parsedFlags
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/clibase"
|
||||
"github.com/coder/coder/cli/clibase"
|
||||
)
|
||||
|
||||
// ioBufs is the standard input, output, and error for a command.
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/coder/coder/v2/cli/clibase"
|
||||
"github.com/coder/coder/cli/clibase"
|
||||
)
|
||||
|
||||
func TestFilterNamePrefix(t *testing.T) {
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
package clibase
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/pion/udp"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// Net abstracts CLI commands interacting with the operating system networking.
|
||||
//
|
||||
// At present, it covers opening local listening sockets, since doing this
|
||||
// in testing is a challenge without flakes, since it's hard to pick a port we
|
||||
// know a priori will be free.
|
||||
type Net interface {
|
||||
// Listen has the same semantics as `net.Listen` but also supports `udp`
|
||||
Listen(network, address string) (net.Listener, error)
|
||||
}
|
||||
|
||||
// osNet is an implementation that call the real OS for networking.
|
||||
type osNet struct{}
|
||||
|
||||
func (osNet) Listen(network, address string) (net.Listener, error) {
|
||||
switch network {
|
||||
case "tcp", "tcp4", "tcp6", "unix", "unixpacket":
|
||||
return net.Listen(network, address)
|
||||
case "udp":
|
||||
host, port, err := net.SplitHostPort(address)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("split %q: %w", address, err)
|
||||
}
|
||||
|
||||
var portInt int
|
||||
portInt, err = strconv.Atoi(port)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse port %v from %q as int: %w", port, address, err)
|
||||
}
|
||||
|
||||
// Use pion here so that we get a stream-style net.Conn listener, instead
|
||||
// of a packet-oriented connection that can read and write to multiple
|
||||
// addresses.
|
||||
return udp.Listen(network, &net.UDPAddr{
|
||||
IP: net.ParseIP(host),
|
||||
Port: portInt,
|
||||
})
|
||||
default:
|
||||
return nil, xerrors.Errorf("unknown listen network %q", network)
|
||||
}
|
||||
}
|
||||
+18
-129
@@ -1,8 +1,6 @@
|
||||
package clibase
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
@@ -67,20 +65,6 @@ type Option struct {
|
||||
ValueSource ValueSource `json:"value_source,omitempty"`
|
||||
}
|
||||
|
||||
// optionNoMethods is just a wrapper around Option so we can defer to the
|
||||
// default json.Unmarshaler behavior.
|
||||
type optionNoMethods Option
|
||||
|
||||
func (o *Option) UnmarshalJSON(data []byte) error {
|
||||
// If an option has no values, we have no idea how to unmarshal it.
|
||||
// So just discard the json data.
|
||||
if o.Value == nil {
|
||||
o.Value = &DiscardValue
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, (*optionNoMethods)(o))
|
||||
}
|
||||
|
||||
func (o Option) YAMLPath() string {
|
||||
if o.YAML == "" {
|
||||
return ""
|
||||
@@ -95,101 +79,15 @@ func (o Option) YAMLPath() string {
|
||||
// OptionSet is a group of options that can be applied to a command.
|
||||
type OptionSet []Option
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler for OptionSets. Options have an
|
||||
// interface Value type that cannot handle unmarshalling because the types cannot
|
||||
// be inferred. Since it is a slice, instantiating the Options first does not
|
||||
// help.
|
||||
//
|
||||
// However, we typically do instantiate the slice to have the correct types.
|
||||
// So this unmarshaller will attempt to find the named option in the existing
|
||||
// set, if it cannot, the value is discarded. If the option exists, the value
|
||||
// is unmarshalled into the existing option, and replaces the existing option.
|
||||
//
|
||||
// The value is discarded if it's type cannot be inferred. This behavior just
|
||||
// feels "safer", although it should never happen if the correct option set
|
||||
// is passed in. The situation where this could occur is if a client and server
|
||||
// are on different versions with different options.
|
||||
func (optSet *OptionSet) UnmarshalJSON(data []byte) error {
|
||||
dec := json.NewDecoder(bytes.NewBuffer(data))
|
||||
// Should be a json array, so consume the starting open bracket.
|
||||
t, err := dec.Token()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("read array open bracket: %w", err)
|
||||
}
|
||||
if t != json.Delim('[') {
|
||||
return xerrors.Errorf("expected array open bracket, got %q", t)
|
||||
}
|
||||
|
||||
// As long as json elements exist, consume them. The counter is used for
|
||||
// better errors.
|
||||
var i int
|
||||
OptionSetDecodeLoop:
|
||||
for dec.More() {
|
||||
var opt Option
|
||||
// jValue is a placeholder value that allows us to capture the
|
||||
// raw json for the value to attempt to unmarshal later.
|
||||
var jValue jsonValue
|
||||
opt.Value = &jValue
|
||||
err := dec.Decode(&opt)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("decode %d option: %w", i, err)
|
||||
}
|
||||
// This counter is used to contextualize errors to show which element of
|
||||
// the array we failed to decode. It is only used in the error above, as
|
||||
// if the above works, we can instead use the Option.Name which is more
|
||||
// descriptive and useful. So increment here for the next decode.
|
||||
i++
|
||||
|
||||
// Try to see if the option already exists in the option set.
|
||||
// If it does, just update the existing option.
|
||||
for optIndex, have := range *optSet {
|
||||
if have.Name == opt.Name {
|
||||
if jValue != nil {
|
||||
err := json.Unmarshal(jValue, &(*optSet)[optIndex].Value)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("decode option %q value: %w", have.Name, err)
|
||||
}
|
||||
// Set the opt's value
|
||||
opt.Value = (*optSet)[optIndex].Value
|
||||
} else {
|
||||
// Hopefully the user passed empty values in the option set. There is no easy way
|
||||
// to tell, and if we do not do this, it breaks json.Marshal if we do it again on
|
||||
// this new option set.
|
||||
opt.Value = (*optSet)[optIndex].Value
|
||||
}
|
||||
// Override the existing.
|
||||
(*optSet)[optIndex] = opt
|
||||
// Go to the next option to decode.
|
||||
continue OptionSetDecodeLoop
|
||||
}
|
||||
}
|
||||
|
||||
// If the option doesn't exist, the value will be discarded.
|
||||
// We do this because we cannot infer the type of the value.
|
||||
opt.Value = DiscardValue
|
||||
*optSet = append(*optSet, opt)
|
||||
}
|
||||
|
||||
t, err = dec.Token()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("read array close bracket: %w", err)
|
||||
}
|
||||
if t != json.Delim(']') {
|
||||
return xerrors.Errorf("expected array close bracket, got %q", t)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add adds the given Options to the OptionSet.
|
||||
func (optSet *OptionSet) Add(opts ...Option) {
|
||||
*optSet = append(*optSet, opts...)
|
||||
func (s *OptionSet) Add(opts ...Option) {
|
||||
*s = append(*s, opts...)
|
||||
}
|
||||
|
||||
// Filter will only return options that match the given filter. (return true)
|
||||
func (optSet OptionSet) Filter(filter func(opt Option) bool) OptionSet {
|
||||
func (s OptionSet) Filter(filter func(opt Option) bool) OptionSet {
|
||||
cpy := make(OptionSet, 0)
|
||||
for _, opt := range optSet {
|
||||
for _, opt := range s {
|
||||
if filter(opt) {
|
||||
cpy = append(cpy, opt)
|
||||
}
|
||||
@@ -198,13 +96,13 @@ func (optSet OptionSet) Filter(filter func(opt Option) bool) OptionSet {
|
||||
}
|
||||
|
||||
// FlagSet returns a pflag.FlagSet for the OptionSet.
|
||||
func (optSet *OptionSet) FlagSet() *pflag.FlagSet {
|
||||
if optSet == nil {
|
||||
func (s *OptionSet) FlagSet() *pflag.FlagSet {
|
||||
if s == nil {
|
||||
return &pflag.FlagSet{}
|
||||
}
|
||||
|
||||
fs := pflag.NewFlagSet("", pflag.ContinueOnError)
|
||||
for _, opt := range *optSet {
|
||||
for _, opt := range *s {
|
||||
if opt.Flag == "" {
|
||||
continue
|
||||
}
|
||||
@@ -241,8 +139,8 @@ func (optSet *OptionSet) FlagSet() *pflag.FlagSet {
|
||||
|
||||
// ParseEnv parses the given environment variables into the OptionSet.
|
||||
// Use EnvsWithPrefix to filter out prefixes.
|
||||
func (optSet *OptionSet) ParseEnv(vs []EnvVar) error {
|
||||
if optSet == nil {
|
||||
func (s *OptionSet) ParseEnv(vs []EnvVar) error {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -256,21 +154,12 @@ func (optSet *OptionSet) ParseEnv(vs []EnvVar) error {
|
||||
envs[v.Name] = v.Value
|
||||
}
|
||||
|
||||
for i, opt := range *optSet {
|
||||
for i, opt := range *s {
|
||||
if opt.Env == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
envVal, ok := envs[opt.Env]
|
||||
if !ok {
|
||||
// Homebrew strips all environment variables that do not start with `HOMEBREW_`.
|
||||
// This prevented using brew to invoke the Coder agent, because the environment
|
||||
// variables to not get passed down.
|
||||
//
|
||||
// A customer wanted to use their custom tap inside a workspace, which was failing
|
||||
// because the agent lacked the environment variables to authenticate with Git.
|
||||
envVal, ok = envs[`HOMEBREW_`+opt.Env]
|
||||
}
|
||||
// Currently, empty values are treated as if the environment variable is
|
||||
// unset. This behavior is technically not correct as there is now no
|
||||
// way for a user to change a Default value to an empty string from
|
||||
@@ -283,7 +172,7 @@ func (optSet *OptionSet) ParseEnv(vs []EnvVar) error {
|
||||
continue
|
||||
}
|
||||
|
||||
(*optSet)[i].ValueSource = ValueSourceEnv
|
||||
(*s)[i].ValueSource = ValueSourceEnv
|
||||
if err := opt.Value.Set(envVal); err != nil {
|
||||
merr = multierror.Append(
|
||||
merr, xerrors.Errorf("parse %q: %w", opt.Name, err),
|
||||
@@ -296,14 +185,14 @@ func (optSet *OptionSet) ParseEnv(vs []EnvVar) error {
|
||||
|
||||
// SetDefaults sets the default values for each Option, skipping values
|
||||
// that already have a value source.
|
||||
func (optSet *OptionSet) SetDefaults() error {
|
||||
if optSet == nil {
|
||||
func (s *OptionSet) SetDefaults() error {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var merr *multierror.Error
|
||||
|
||||
for i, opt := range *optSet {
|
||||
for i, opt := range *s {
|
||||
// Skip values that may have already been set by the user.
|
||||
if opt.ValueSource != ValueSourceNone {
|
||||
continue
|
||||
@@ -323,7 +212,7 @@ func (optSet *OptionSet) SetDefaults() error {
|
||||
)
|
||||
continue
|
||||
}
|
||||
(*optSet)[i].ValueSource = ValueSourceDefault
|
||||
(*s)[i].ValueSource = ValueSourceDefault
|
||||
if err := opt.Value.Set(opt.Default); err != nil {
|
||||
merr = multierror.Append(
|
||||
merr, xerrors.Errorf("parse %q: %w", opt.Name, err),
|
||||
@@ -335,9 +224,9 @@ func (optSet *OptionSet) SetDefaults() error {
|
||||
|
||||
// ByName returns the Option with the given name, or nil if no such option
|
||||
// exists.
|
||||
func (optSet *OptionSet) ByName(name string) *Option {
|
||||
for i := range *optSet {
|
||||
opt := &(*optSet)[i]
|
||||
func (s *OptionSet) ByName(name string) *Option {
|
||||
for i := range *s {
|
||||
opt := &(*s)[i]
|
||||
if opt.Name == name {
|
||||
return opt
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user