Compare commits
193 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 6285d65b6a | |||
| 611ca55458 | |||
| 34d902ebf1 | |||
| dc9b4155e0 | |||
| f4c5020f63 | |||
| b9b9c2fb9f | |||
| ccabec6dd1 | |||
| 23f61fce2a | |||
| 98a6958f10 | |||
| 6a00baf235 | |||
| c8f8c95f6a | |||
| 623fc5baac | |||
| ca3811499e | |||
| 14a9576b77 | |||
| 94e96fa40b | |||
| 8a446837d4 | |||
| 7a77e55bd4 | |||
| b412cc1a4b | |||
| 78a24941fe | |||
| a21a6d2f4a | |||
| 4de1fc8339 | |||
| a05fad4efd | |||
| 6e496077ae | |||
| cf0d2c9bbc | |||
| e6b6b7f610 | |||
| 0b53b06fc6 | |||
| 076c4a0aa8 | |||
| 9e35793b43 | |||
| 254e91a08f | |||
| 5d7c4092ac | |||
| c9bce19d88 | |||
| da54874958 | |||
| 57c202d112 | |||
| 4e3b212707 | |||
| 4f8270d95b | |||
| 1400d7cd84 | |||
| ca3c0490e0 | |||
| 123fe0131e | |||
| 09142255e6 | |||
| 706bceb7e7 | |||
| eba753ba87 | |||
| 343d1184b2 | |||
| 7a71180ae6 | |||
| 253e6cbffa | |||
| 184f0625e1 | |||
| 6dacf70898 | |||
| b9dd566804 | |||
| e44f7adb7e | |||
| 9c0cd5287c | |||
| 5025fe2fa0 | |||
| 49de44c76d | |||
| f7ccfa2ab9 | |||
| 8343a4f199 | |||
| a7b49788f5 | |||
| a07ca946c3 | |||
| 8ca3fa9712 | |||
| b101a6f3f4 | |||
| 85acfdf0dc | |||
| 2ee6acb2ad | |||
| 6fde537f9c | |||
| 5e36be8cbb | |||
| 58d29264aa | |||
| 369a9fb535 | |||
| 68e17921f0 | |||
| b0fe9bcdd1 | |||
| d37fb054c8 | |||
| 54b8e794ce | |||
| a4c90c591d | |||
| 690e6c6585 | |||
| 91bfcca287 | |||
| c14a4b92ed | |||
| e938e8577f | |||
| 985eea6099 | |||
| c417115eb1 | |||
| 544bf01fbb | |||
| 80f042f01b | |||
| 57f3410009 | |||
| 3fdae47b87 | |||
| 4ba3573632 | |||
| f6b0835982 | |||
| 04c5f924d7 | |||
| 7599ad4bf6 | |||
| aabb72783c | |||
| 55890df6f1 | |||
| 3610402cd8 | |||
| c43297937b | |||
| f1423450bd | |||
| 6a0f8ae9cc | |||
| 380022fe63 | |||
| c3eea98db0 | |||
| 53d1fb36db | |||
| d6351a6b9f | |||
| 546157b63e | |||
| 4b646cc4fa | |||
| acd0cd66f6 | |||
| 5c898d0c83 | |||
| c3f946737c | |||
| 000e1a5ef2 | |||
| a872330a8d | |||
| b1b2d1b2b2 | |||
| 5817c6ac7f | |||
| 4be61d9250 | |||
| 4b6a82f92a | |||
| 01dd35f1ba | |||
| 2306d2c709 | |||
| e749070193 | |||
| 301727d1fc | |||
| 8cf82112ad | |||
| 40e68cb80b | |||
| c41261cf6e | |||
| 351d55e1f4 | |||
| 3b951f77fb | |||
| 0a46b1e59d | |||
| 010f64e8e9 | |||
| 0e8c68ebc5 | |||
| c3fcf7c953 | |||
| b3d3b8ba0f | |||
| 16c12e976e | |||
| ca342067b3 | |||
| d7b96f7d58 | |||
| 923c212960 | |||
| 3ae42f4de9 | |||
| 4a17e0d91f | |||
| 604f211674 | |||
| 6122df6f1f | |||
| 4e6645af50 | |||
| 426b30ed16 | |||
| 272962cfae | |||
| 5d40b1f0f4 | |||
| cee0d1f848 | |||
| 95f26f74b6 | |||
| d6d9cf9b30 | |||
| fd73d6dd0d | |||
| 758eb21b36 | |||
| f28cd15706 | |||
| 3ceee76784 | |||
| c73f708678 | |||
| 815bf1b668 | |||
| 88c9f31007 | |||
| fd59e2e812 | |||
| db665e7261 | |||
| ccf6f4e7ed | |||
| 690ba661a7 | |||
| 53400c6205 | |||
| e1da2b6467 | |||
| c0cc8b9935 | |||
| f62e1ede77 | |||
| 7bdb8ff9cf | |||
| e62677efab | |||
| 049e7cb5df | |||
| a848e71f58 | |||
| 42bac09c1a | |||
| d275e52a41 | |||
| eb7d947d10 | |||
| 9c12b4ed8e | |||
| 3279504cbe | |||
| 13a2014d7f | |||
| 8d4b6086f6 | |||
| 44a826dc06 | |||
| 1fb274cbda | |||
| b10a1b84e5 | |||
| f14efd1a2b | |||
| 854bb5dbeb | |||
| e7bc01383c | |||
| 01fe5e668e | |||
| 46d64c624a | |||
| fb9fca8bc9 | |||
| ad20b23178 | |||
| 303b280e0e | |||
| 075454cce8 | |||
| 9f54fa8e52 | |||
| fd4e2cc331 | |||
| 8a4438895b | |||
| b6774ead2c | |||
| 7e1caa7086 | |||
| 69664ed168 | |||
| 420fae886a | |||
| 6e426cf47d | |||
| 9a023dd63b | |||
| 1d6283bdac | |||
| 8f338782db | |||
| 81e292be44 | |||
| 8bcf23e60a | |||
| 83c63d4a63 | |||
| 5ae19f097e | |||
| bd785ddd87 | |||
| c1885dab27 | |||
| 8a2811210a | |||
| 877519232c | |||
| 66a5b0f7bc | |||
| 8f3727d05d | |||
| 80223a5e41 | |||
| 56ee105a2a |
+4
-1
@@ -9,6 +9,9 @@ github_checks:
|
||||
annotations: false
|
||||
|
||||
coverage:
|
||||
range: 50..75
|
||||
round: down
|
||||
precision: 2
|
||||
status:
|
||||
patch:
|
||||
default:
|
||||
@@ -16,7 +19,7 @@ coverage:
|
||||
project:
|
||||
default:
|
||||
target: 65%
|
||||
informational: yes
|
||||
informational: true
|
||||
|
||||
ignore:
|
||||
# This is generated code.
|
||||
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: typos-action
|
||||
uses: crate-ci/typos@v1.0.4
|
||||
uses: crate-ci/typos@master
|
||||
with:
|
||||
config: .github/workflows/typos.toml
|
||||
- name: Fix Helper
|
||||
@@ -52,6 +52,7 @@ jobs:
|
||||
docs-only: ${{ steps.filter.outputs.docs_count == steps.filter.outputs.all_count }}
|
||||
sh: ${{ steps.filter.outputs.sh }}
|
||||
ts: ${{ steps.filter.outputs.ts }}
|
||||
k8s: ${{ steps.filter.outputs.k8s }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
# For pull requests it's not necessary to checkout the code
|
||||
@@ -68,10 +69,11 @@ jobs:
|
||||
sh:
|
||||
- "**.sh"
|
||||
ts:
|
||||
- "**.tsx?"
|
||||
- "**.jsx?"
|
||||
- "**.lock"
|
||||
- "**.json"
|
||||
- 'site/**'
|
||||
k8s:
|
||||
- 'helm/**'
|
||||
- Dockerfile
|
||||
- scripts/helm.sh
|
||||
- id: debug
|
||||
run: |
|
||||
echo "${{ toJSON(steps.filter )}}"
|
||||
@@ -94,11 +96,20 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "~1.18"
|
||||
go-version: "~1.19"
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3.2.0
|
||||
with:
|
||||
version: v1.46.0
|
||||
version: v1.48.0
|
||||
|
||||
check-enterprise-imports:
|
||||
name: check/enterprise-imports
|
||||
timeout-minutes: 5
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Check imports of enterprise code
|
||||
run: ./scripts/check_enterprise_imports.sh
|
||||
|
||||
style-lint-shellcheck:
|
||||
name: style/lint/shellcheck
|
||||
@@ -139,6 +150,26 @@ jobs:
|
||||
run: yarn lint
|
||||
working-directory: site
|
||||
|
||||
style-lint-k8s:
|
||||
name: "style/lint/k8s"
|
||||
timeout-minutes: 5
|
||||
needs: changes
|
||||
if: needs.changes.outputs.k8s == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install helm
|
||||
uses: azure/setup-helm@v3
|
||||
with:
|
||||
version: v3.9.2
|
||||
|
||||
- name: cd helm && make lint
|
||||
run: |
|
||||
cd helm
|
||||
make lint
|
||||
|
||||
gen:
|
||||
name: "style/gen"
|
||||
timeout-minutes: 8
|
||||
@@ -168,7 +199,7 @@ jobs:
|
||||
version: "3.20.0"
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "~1.18"
|
||||
go-version: "~1.19"
|
||||
|
||||
- name: Echo Go Cache Paths
|
||||
id: go-cache-paths
|
||||
@@ -188,14 +219,21 @@ jobs:
|
||||
path: ${{ steps.go-cache-paths.outputs.go-mod }}
|
||||
key: ${{ github.job }}-go-mod-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- run: |
|
||||
- name: Install sqlc
|
||||
run: |
|
||||
curl -sSL https://github.com/kyleconroy/sqlc/releases/download/v1.13.0/sqlc_1.13.0_linux_amd64.tar.gz | sudo tar -C /usr/bin -xz sqlc
|
||||
- name: Install protoc-gen-go
|
||||
run: go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.26
|
||||
- name: Install protoc-gen-go-drpc
|
||||
run: go install storj.io/drpc/cmd/protoc-gen-go-drpc@v0.0.26
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports@latest
|
||||
|
||||
- run: go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.26
|
||||
- run: go install storj.io/drpc/cmd/protoc-gen-go-drpc@v0.0.26
|
||||
- run: go install golang.org/x/tools/cmd/goimports@latest
|
||||
- run: "make --output-sync -j -B gen"
|
||||
- run: ./scripts/check_unstaged.sh
|
||||
- name: make gen
|
||||
run: "make --output-sync -j -B gen"
|
||||
|
||||
- name: Check for unstaged files
|
||||
run: ./scripts/check_unstaged.sh
|
||||
|
||||
style-fmt:
|
||||
name: "style/fmt"
|
||||
@@ -225,7 +263,8 @@ jobs:
|
||||
- name: Install shfmt
|
||||
run: go install mvdan.cc/sh/v3/cmd/shfmt@v3.5.0
|
||||
|
||||
- run: |
|
||||
- name: make fmt
|
||||
run: |
|
||||
export PATH=${PATH}:$(go env GOPATH)/bin
|
||||
make --output-sync -j -B fmt
|
||||
|
||||
@@ -244,7 +283,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "~1.18"
|
||||
go-version: "~1.19"
|
||||
|
||||
- name: Echo Go Cache Paths
|
||||
id: go-cache-paths
|
||||
@@ -331,7 +370,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "~1.18"
|
||||
go-version: "~1.19"
|
||||
|
||||
- name: Echo Go Cache Paths
|
||||
id: go-cache-paths
|
||||
@@ -414,7 +453,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "~1.18"
|
||||
go-version: "~1.19"
|
||||
|
||||
- name: Echo Go Cache Paths
|
||||
id: go-cache-paths
|
||||
@@ -519,7 +558,7 @@ jobs:
|
||||
# Go is required for uploading the test results to datadog
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "~1.18"
|
||||
go-version: "~1.19"
|
||||
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
@@ -577,7 +616,7 @@ jobs:
|
||||
# Go is required for uploading the test results to datadog
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "~1.18"
|
||||
go-version: "~1.19"
|
||||
|
||||
- uses: hashicorp/setup-terraform@v2
|
||||
with:
|
||||
@@ -622,6 +661,14 @@ jobs:
|
||||
DEBUG: pw:api
|
||||
working-directory: site
|
||||
|
||||
- name: Upload Playwright Failed Tests
|
||||
if: always() && github.actor != 'dependabot[bot]' && runner.os == 'Linux' && !github.event.pull_request.head.repo.fork
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: failed-test-videos
|
||||
path: ./site/test-results/**/*.webm
|
||||
retention:days: 7
|
||||
|
||||
- name: Upload DataDog Trace
|
||||
if: always() && github.actor != 'dependabot[bot]' && runner.os == 'Linux' && !github.event.pull_request.head.repo.fork
|
||||
env:
|
||||
|
||||
@@ -0,0 +1,51 @@
|
||||
name: dogfood
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- "*"
|
||||
paths:
|
||||
- "dogfood/**"
|
||||
pull_request:
|
||||
paths:
|
||||
- "dogfood/**"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Get branch name
|
||||
id: branch-name
|
||||
uses: tj-actions/branch-names@v5.4
|
||||
|
||||
- name: "Branch name to Docker tag name"
|
||||
id: docker-tag-name
|
||||
run: |
|
||||
tag=${{ steps.branch-name.outputs.current_branch }}
|
||||
# Replace / with --, e.g. user/feature => user--feature.
|
||||
tag=${tag//\//--}
|
||||
echo "::set-output name=tag::${tag}"
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: "{{defaultContext}}:dogfood"
|
||||
push: true
|
||||
tags: "codercom/oss-dogfood:${{ steps.docker-tag-name.outputs.tag }},codercom/oss-dogfood:latest"
|
||||
cache-from: type=registry,ref=codercom/oss-dogfood:latest
|
||||
cache-to: type=inline
|
||||
@@ -102,7 +102,7 @@ jobs:
|
||||
|
||||
# build and (maybe) push Docker images for each architecture
|
||||
images=()
|
||||
for arch in amd64; do
|
||||
for arch in amd64 armv7 arm64; do
|
||||
img="$(
|
||||
./scripts/build_docker.sh \
|
||||
${{ (!github.event.inputs.dry_run && !github.event.inputs.snapshot) && '--push' || '' }} \
|
||||
@@ -187,10 +187,10 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
set -euo pipefail
|
||||
# The version of bash that MacOS ships with is too old
|
||||
# The version of bash that macOS ships with is too old
|
||||
brew install bash
|
||||
|
||||
# The version of make that MacOS ships with is too old
|
||||
# The version of make that macOS ships with is too old
|
||||
brew install make
|
||||
echo "$(brew --prefix)/opt/make/libexec/gnubin" >> $GITHUB_PATH
|
||||
|
||||
|
||||
@@ -31,6 +31,5 @@ jobs:
|
||||
isn't more activity.
|
||||
# Upped from 30 since we have a big tracker and was hitting the limit.
|
||||
operations-per-run: 60
|
||||
close-issue-reason: not_planned
|
||||
# Start with the oldest issues, always.
|
||||
ascending: true
|
||||
|
||||
@@ -2,12 +2,14 @@
|
||||
alog = "alog"
|
||||
Jetbrains = "JetBrains"
|
||||
IST = "IST"
|
||||
MacOS = "macOS"
|
||||
|
||||
[default.extend-words]
|
||||
|
||||
[files]
|
||||
extend-exclude = [
|
||||
"**.svg",
|
||||
"**.png",
|
||||
"**.lock",
|
||||
"go.sum",
|
||||
"go.mod",
|
||||
|
||||
@@ -34,6 +34,7 @@ dist/
|
||||
site/out/
|
||||
|
||||
*.tfstate
|
||||
*.tfstate.backup
|
||||
*.tfplan
|
||||
*.lock.hcl
|
||||
.terraform/
|
||||
|
||||
Vendored
+7
@@ -2,6 +2,7 @@
|
||||
"cSpell.words": [
|
||||
"apps",
|
||||
"awsidentity",
|
||||
"bodyclose",
|
||||
"buildinfo",
|
||||
"buildname",
|
||||
"circbuf",
|
||||
@@ -11,6 +12,7 @@
|
||||
"coderdtest",
|
||||
"codersdk",
|
||||
"cronstrue",
|
||||
"databasefake",
|
||||
"devel",
|
||||
"drpc",
|
||||
"drpcconn",
|
||||
@@ -36,6 +38,7 @@
|
||||
"Jobf",
|
||||
"Keygen",
|
||||
"kirsle",
|
||||
"Kubernetes",
|
||||
"ldflags",
|
||||
"manifoldco",
|
||||
"mapstructure",
|
||||
@@ -50,8 +53,10 @@
|
||||
"ntqry",
|
||||
"OIDC",
|
||||
"oneof",
|
||||
"paralleltest",
|
||||
"parameterscopeid",
|
||||
"pqtype",
|
||||
"prometheusmetrics",
|
||||
"promptui",
|
||||
"protobuf",
|
||||
"provisionerd",
|
||||
@@ -72,10 +77,12 @@
|
||||
"templateversions",
|
||||
"testdata",
|
||||
"testid",
|
||||
"testutil",
|
||||
"tfexec",
|
||||
"tfjson",
|
||||
"tfplan",
|
||||
"tfstate",
|
||||
"tparallel",
|
||||
"trimprefix",
|
||||
"turnconn",
|
||||
"typegen",
|
||||
|
||||
+13
-5
@@ -1,4 +1,8 @@
|
||||
FROM alpine
|
||||
# This is the multi-arch Dockerfile used for Coder. Since it's multi-arch and
|
||||
# cross-compiled, it cannot have ANY "RUN" commands. All binaries are built
|
||||
# using the go toolchain on the host and then copied into the build context by
|
||||
# scripts/build_docker.sh.
|
||||
FROM alpine:latest
|
||||
|
||||
# LABEL doesn't add any real layers so it's fine (and easier) to do it here than
|
||||
# in the build script.
|
||||
@@ -12,11 +16,15 @@ LABEL \
|
||||
org.opencontainers.image.licenses="AGPL-3.0"
|
||||
|
||||
# The coder binary is injected by scripts/build_docker.sh.
|
||||
ADD coder /opt/coder
|
||||
COPY --chown=coder:coder --chmod=755 coder /opt/coder
|
||||
|
||||
# Create coder group and user. We cannot use `addgroup` and `adduser` because
|
||||
# they won't work if we're building the image for a different architecture.
|
||||
COPY --chown=root:root --chmod=644 group passwd /etc/
|
||||
COPY --chown=coder:coder --chmod=700 empty-dir /home/coder
|
||||
|
||||
# Create coder group and user.
|
||||
RUN addgroup -g 1000 coder && \
|
||||
adduser -D -g "" -h /home/coder -G coder -u 1000 -S -s /bin/sh coder
|
||||
USER coder:coder
|
||||
ENV HOME=/home/coder
|
||||
WORKDIR /home/coder
|
||||
|
||||
ENTRYPOINT [ "/opt/coder", "server" ]
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
LICENSE (GNU Affero General Public License) applies to
|
||||
all files in this repository, except for those in or under
|
||||
any directory named "enterprise", which are Copyright Coder
|
||||
Technologies, Inc., All Rights Reserved.
|
||||
|
||||
We plan to release an enterprise license covering these files
|
||||
as soon as possible. Watch this space.
|
||||
|
||||
|
||||
@@ -56,18 +56,13 @@ build: site/out/index.html $(shell find . -not -path './vendor/*' -type f -name
|
||||
.PHONY: build
|
||||
|
||||
# Runs migrations to output a dump of the database.
|
||||
coderd/database/dump.sql: $(wildcard coderd/database/migrations/*.sql)
|
||||
go run coderd/database/dump/main.go
|
||||
coderd/database/dump.sql: coderd/database/gen/dump/main.go $(wildcard coderd/database/migrations/*.sql)
|
||||
go run coderd/database/gen/dump/main.go
|
||||
|
||||
# Generates Go code for querying the database.
|
||||
coderd/database/querier.go: coderd/database/sqlc.yaml coderd/database/dump.sql $(wildcard coderd/database/queries/*.sql)
|
||||
coderd/database/querier.go: coderd/database/sqlc.yaml coderd/database/dump.sql $(wildcard coderd/database/queries/*.sql) coderd/database/gen/enum/main.go
|
||||
coderd/database/generate.sh
|
||||
|
||||
# This target is deprecated, as GNU make has issues passing signals to subprocesses.
|
||||
dev:
|
||||
@echo Please run ./scripts/develop.sh manually.
|
||||
.PHONY: dev
|
||||
|
||||
fmt/prettier:
|
||||
@echo "--- prettier"
|
||||
cd site
|
||||
@@ -121,6 +116,7 @@ lint: lint/shellcheck lint/go
|
||||
.PHONY: lint
|
||||
|
||||
lint/go:
|
||||
./scripts/check_enterprise_imports.sh
|
||||
golangci-lint run
|
||||
.PHONY: lint/go
|
||||
|
||||
|
||||
@@ -3,8 +3,9 @@
|
||||
[](https://discord.gg/coder)
|
||||
[](https://codecov.io/gh/coder/coder)
|
||||
[](https://pkg.go.dev/github.com/coder/coder)
|
||||
[](https://twitter.com/coderhq)
|
||||
Follow](https://img.shields.io/twitter/follow/coderhq?label=%40coderhq&style=social)](https://twitter.com/coderhq)
|
||||
|
||||
Coder creates remote development machines so your team can develop from anywhere.
|
||||
|
||||
@@ -53,7 +54,7 @@ curl -L https://coder.com/install.sh | sh -s -- --help
|
||||
|
||||
> See [install](docs/install.md) for additional methods.
|
||||
|
||||
Once installed, you can start a production deployment with a single command:
|
||||
Once installed, you can start a production deployment<sup>1</sup> with a single command:
|
||||
|
||||
```sh
|
||||
# Automatically sets up an external access URL on *.try.coder.app
|
||||
@@ -63,6 +64,8 @@ coder server --tunnel
|
||||
coder server --postgres-url <url> --access-url <url>
|
||||
```
|
||||
|
||||
> <sup>1</sup> The embedded database is great for trying out Coder with small deployments, but do consider using an external database for increased assurance and control.
|
||||
|
||||
Use `coder --help` to get a complete list of flags and environment variables. Use our [quickstart guide](https://coder.com/docs/coder-oss/latest/quickstart) for a full walkthrough.
|
||||
|
||||
## Documentation
|
||||
@@ -94,4 +97,4 @@ Join our community on [Discord](https://discord.gg/coder) and [Twitter](https://
|
||||
|
||||
Read the [contributing docs](https://coder.com/docs/coder-oss/latest/CONTRIBUTING).
|
||||
|
||||
Find our list of contributors [here](./docs/CONTRIBUTORS.md).
|
||||
Find our list of contributors [here](https://github.com/coder/coder/graphs/contributors).
|
||||
|
||||
@@ -129,6 +129,7 @@ func (a *agent) run(ctx context.Context) {
|
||||
// An exponential back-off occurs when the connection is failing to dial.
|
||||
// This is to prevent server spam in case of a coderd outage.
|
||||
for retrier := retry.New(50*time.Millisecond, 10*time.Second); retrier.Wait(ctx); {
|
||||
a.logger.Info(ctx, "connecting")
|
||||
metadata, peerListener, err = a.dialer(ctx, a.logger)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
@@ -255,6 +256,7 @@ func (a *agent) handlePeerConn(ctx context.Context, conn *peer.Conn) {
|
||||
}
|
||||
|
||||
func (a *agent) init(ctx context.Context) {
|
||||
a.logger.Info(ctx, "generating host key")
|
||||
// Clients' should ignore the host key when connecting.
|
||||
// The agent needs to authenticate with coderd to SSH,
|
||||
// so SSH authentication doesn't improve security.
|
||||
@@ -392,12 +394,25 @@ func (a *agent) createCommand(ctx context.Context, rawCommand string, env []stri
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting os executable: %w", err)
|
||||
}
|
||||
// Set environment variables reliable detection of being inside a
|
||||
// Coder workspace.
|
||||
cmd.Env = append(cmd.Env, "CODER=true")
|
||||
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("USER=%s", username))
|
||||
// Git on Windows resolves with UNIX-style paths.
|
||||
// If using backslashes, it's unable to find the executable.
|
||||
unixExecutablePath := strings.ReplaceAll(executablePath, "\\", "/")
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf(`GIT_SSH_COMMAND=%s gitssh --`, unixExecutablePath))
|
||||
|
||||
// Set SSH connection environment variables (these are also set by OpenSSH
|
||||
// and thus expected to be present by SSH clients). Since the agent does
|
||||
// networking in-memory, trying to provide accurate values here would be
|
||||
// nonsensical. For now, we hard code these values so that they're present.
|
||||
srcAddr, srcPort := "0.0.0.0", "0"
|
||||
dstAddr, dstPort := "0.0.0.0", "0"
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("SSH_CLIENT=%s %s %s", srcAddr, srcPort, dstPort))
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("SSH_CONNECTION=%s %s %s %s", srcAddr, srcPort, dstAddr, dstPort))
|
||||
|
||||
// Load environment variables passed via the agent.
|
||||
// These should override all variables we manually specify.
|
||||
for envKey, value := range metadata.EnvironmentVariables {
|
||||
@@ -435,6 +450,8 @@ func (a *agent) handleSSHSession(session ssh.Session) (retErr error) {
|
||||
sshPty, windowSize, isPty := session.Pty()
|
||||
if isPty {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("TERM=%s", sshPty.Term))
|
||||
|
||||
// The pty package sets `SSH_TTY` on supported platforms.
|
||||
ptty, process, err := pty.Start(cmd)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("start command: %w", err)
|
||||
@@ -795,7 +812,9 @@ func (r *reconnectingPTY) Close() {
|
||||
_ = conn.Close()
|
||||
}
|
||||
_ = r.ptty.Close()
|
||||
r.circularBufferMutex.Lock()
|
||||
r.circularBuffer.Reset()
|
||||
r.circularBufferMutex.Unlock()
|
||||
r.timeout.Stop()
|
||||
}
|
||||
|
||||
|
||||
@@ -232,6 +232,49 @@ func TestAgent(t *testing.T) {
|
||||
require.Equal(t, expect, strings.TrimSpace(string(output)))
|
||||
})
|
||||
|
||||
t.Run("Coder env vars", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, key := range []string{"CODER"} {
|
||||
key := key
|
||||
t.Run(key, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
session := setupSSHSession(t, agent.Metadata{})
|
||||
command := "sh -c 'echo $" + key + "'"
|
||||
if runtime.GOOS == "windows" {
|
||||
command = "cmd.exe /c echo %" + key + "%"
|
||||
}
|
||||
output, err := session.Output(command)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, strings.TrimSpace(string(output)))
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("SSH connection env vars", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Note: the SSH_TTY environment variable should only be set for TTYs.
|
||||
// For some reason this test produces a TTY locally and a non-TTY in CI
|
||||
// so we don't test for the absence of SSH_TTY.
|
||||
for _, key := range []string{"SSH_CONNECTION", "SSH_CLIENT"} {
|
||||
key := key
|
||||
t.Run(key, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
session := setupSSHSession(t, agent.Metadata{})
|
||||
command := "sh -c 'echo $" + key + "'"
|
||||
if runtime.GOOS == "windows" {
|
||||
command = "cmd.exe /c echo %" + key + "%"
|
||||
}
|
||||
output, err := session.Output(command)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, strings.TrimSpace(string(output)))
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("StartupScript", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
tempPath := filepath.Join(t.TempDir(), "content.txt")
|
||||
|
||||
@@ -29,6 +29,7 @@ func TestReap(t *testing.T) {
|
||||
// exited processes and passing the PIDs through the shared
|
||||
// channel.
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
pids := make(reap.PidCh, 1)
|
||||
err := reaper.ForkReap(
|
||||
reaper.WithPIDCallback(pids),
|
||||
|
||||
@@ -73,6 +73,7 @@ func workspaceAgent() *cobra.Command {
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.Info(cmd.Context(), "starting agent", slog.F("url", coderURL), slog.F("auth", auth))
|
||||
client := codersdk.New(coderURL)
|
||||
|
||||
if pprofEnabled {
|
||||
@@ -138,6 +139,7 @@ func workspaceAgent() *cobra.Command {
|
||||
}
|
||||
|
||||
if exchangeToken != nil {
|
||||
logger.Info(cmd.Context(), "exchanging identity token")
|
||||
// Agent's can start before resources are returned from the provisioner
|
||||
// daemon. If there are many resources being provisioned, this time
|
||||
// could be significant. This is arbitrarily set at an hour to prevent
|
||||
|
||||
@@ -6,8 +6,7 @@
|
||||
//
|
||||
// Will produce the following usage docs:
|
||||
//
|
||||
// -a, --address string The address to serve the API and dashboard (uses $CODER_ADDRESS). (default "127.0.0.1:3000")
|
||||
//
|
||||
// -a, --address string The address to serve the API and dashboard (uses $CODER_ADDRESS). (default "127.0.0.1:3000")
|
||||
package cliflag
|
||||
|
||||
import (
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
)
|
||||
|
||||
// Testcliflag cannot run in parallel because it uses t.Setenv.
|
||||
//
|
||||
//nolint:paralleltest
|
||||
func TestCliflag(t *testing.T) {
|
||||
t.Run("StringDefault", func(t *testing.T) {
|
||||
|
||||
@@ -21,7 +21,13 @@ import (
|
||||
// New creates a CLI instance with a configuration pointed to a
|
||||
// temporary testing directory.
|
||||
func New(t *testing.T, args ...string) (*cobra.Command, config.Root) {
|
||||
cmd := cli.Root()
|
||||
return NewWithSubcommands(t, cli.AGPL(), args...)
|
||||
}
|
||||
|
||||
func NewWithSubcommands(
|
||||
t *testing.T, subcommands []*cobra.Command, args ...string,
|
||||
) (*cobra.Command, config.Root) {
|
||||
cmd := cli.Root(subcommands)
|
||||
dir := t.TempDir()
|
||||
root := config.Root(dir)
|
||||
cmd.SetArgs(append([]string{"--global-config", dir}, args...))
|
||||
|
||||
+1
-1
@@ -79,7 +79,7 @@ func Agent(ctx context.Context, writer io.Writer, opts AgentOptions) error {
|
||||
defer resourceMutex.Unlock()
|
||||
message := "Don't panic, your workspace is booting up!"
|
||||
if agent.Status == codersdk.WorkspaceAgentDisconnected {
|
||||
message = "The workspace agent lost connection! Wait for it to reconnect or run: " + Styles.Code.Render("coder rebuild "+opts.WorkspaceName)
|
||||
message = "The workspace agent lost connection! Wait for it to reconnect or restart your workspace."
|
||||
}
|
||||
// This saves the cursor position, then defers clearing from the cursor
|
||||
// position to the end of the screen.
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
package cliui
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/fatih/structtag"
|
||||
"github.com/jedib0t/go-pretty/v6/table"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// Table creates a new table with standardized styles.
|
||||
@@ -41,3 +46,262 @@ func FilterTableColumns(header table.Row, columns []string) []table.ColumnConfig
|
||||
}
|
||||
return columnConfigs
|
||||
}
|
||||
|
||||
// DisplayTable renders a table as a string. The input argument must be a slice
|
||||
// of structs. At least one field in the struct must have a `table:""` tag
|
||||
// containing the name of the column in the outputted table.
|
||||
//
|
||||
// Nested structs are processed if the field has the `table:"$NAME,recursive"`
|
||||
// tag and their fields will be named as `$PARENT_NAME $NAME`. If the tag is
|
||||
// malformed or a field is marked as recursive but does not contain a struct or
|
||||
// a pointer to a struct, this function will return an error (even with an empty
|
||||
// input slice).
|
||||
//
|
||||
// If sort is empty, the input order will be used. If filterColumns is empty or
|
||||
// nil, all available columns are included.
|
||||
func DisplayTable(out any, sort string, filterColumns []string) (string, error) {
|
||||
v := reflect.Indirect(reflect.ValueOf(out))
|
||||
|
||||
if v.Kind() != reflect.Slice {
|
||||
return "", xerrors.Errorf("DisplayTable called with a non-slice type")
|
||||
}
|
||||
|
||||
// Get the list of table column headers.
|
||||
headersRaw, err := typeToTableHeaders(v.Type().Elem())
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("get table headers recursively for type %q: %w", v.Type().Elem().String(), err)
|
||||
}
|
||||
if len(headersRaw) == 0 {
|
||||
return "", xerrors.New(`no table headers found on the input type, make sure there is at least one "table" struct tag`)
|
||||
}
|
||||
headers := make(table.Row, len(headersRaw))
|
||||
for i, header := range headersRaw {
|
||||
headers[i] = header
|
||||
}
|
||||
|
||||
// Verify that the given sort column and filter columns are valid.
|
||||
if sort != "" || len(filterColumns) != 0 {
|
||||
headersMap := make(map[string]string, len(headersRaw))
|
||||
for _, header := range headersRaw {
|
||||
headersMap[strings.ToLower(header)] = header
|
||||
}
|
||||
|
||||
if sort != "" {
|
||||
sort = strings.ToLower(strings.ReplaceAll(sort, "_", " "))
|
||||
h, ok := headersMap[sort]
|
||||
if !ok {
|
||||
return "", xerrors.Errorf(`specified sort column %q not found in table headers, available columns are "%v"`, sort, strings.Join(headersRaw, `", "`))
|
||||
}
|
||||
|
||||
// Autocorrect
|
||||
sort = h
|
||||
}
|
||||
|
||||
for i, column := range filterColumns {
|
||||
column := strings.ToLower(strings.ReplaceAll(column, "_", " "))
|
||||
h, ok := headersMap[column]
|
||||
if !ok {
|
||||
return "", xerrors.Errorf(`specified filter column %q not found in table headers, available columns are "%v"`, sort, strings.Join(headersRaw, `", "`))
|
||||
}
|
||||
|
||||
// Autocorrect
|
||||
filterColumns[i] = h
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that the given sort column is valid.
|
||||
if sort != "" {
|
||||
sort = strings.ReplaceAll(sort, "_", " ")
|
||||
found := false
|
||||
for _, header := range headersRaw {
|
||||
if strings.EqualFold(sort, header) {
|
||||
found = true
|
||||
sort = header
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return "", xerrors.Errorf("specified sort column %q not found in table headers, available columns are %q", sort, strings.Join(headersRaw, `", "`))
|
||||
}
|
||||
}
|
||||
|
||||
// Setup the table formatter.
|
||||
tw := Table()
|
||||
tw.AppendHeader(headers)
|
||||
tw.SetColumnConfigs(FilterTableColumns(headers, filterColumns))
|
||||
if sort != "" {
|
||||
tw.SortBy([]table.SortBy{{
|
||||
Name: sort,
|
||||
}})
|
||||
}
|
||||
|
||||
// Write each struct to the table.
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
// Format the row as a slice.
|
||||
rowMap, err := valueToTableMap(v.Index(i))
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("get table row map %v: %w", i, err)
|
||||
}
|
||||
|
||||
rowSlice := make([]any, len(headers))
|
||||
for i, h := range headersRaw {
|
||||
v, ok := rowMap[h]
|
||||
if !ok {
|
||||
v = nil
|
||||
}
|
||||
|
||||
// Special type formatting.
|
||||
switch val := v.(type) {
|
||||
case time.Time:
|
||||
v = val.Format(time.Stamp)
|
||||
case *time.Time:
|
||||
if val != nil {
|
||||
v = val.Format(time.Stamp)
|
||||
}
|
||||
case fmt.Stringer:
|
||||
if val != nil {
|
||||
v = val.String()
|
||||
}
|
||||
}
|
||||
|
||||
rowSlice[i] = v
|
||||
}
|
||||
|
||||
tw.AppendRow(table.Row(rowSlice))
|
||||
}
|
||||
|
||||
return tw.Render(), nil
|
||||
}
|
||||
|
||||
// parseTableStructTag returns the name of the field according to the `table`
|
||||
// struct tag. If the table tag does not exist or is "-", an empty string is
|
||||
// returned. If the table tag is malformed, an error is returned.
|
||||
//
|
||||
// The returned name is transformed from "snake_case" to "normal text".
|
||||
func parseTableStructTag(field reflect.StructField) (name string, recurse bool, err error) {
|
||||
tags, err := structtag.Parse(string(field.Tag))
|
||||
if err != nil {
|
||||
return "", false, xerrors.Errorf("parse struct field tag %q: %w", string(field.Tag), err)
|
||||
}
|
||||
|
||||
tag, err := tags.Get("table")
|
||||
if err != nil || tag.Name == "-" {
|
||||
// tags.Get only returns an error if the tag is not found.
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
recursive := false
|
||||
for _, opt := range tag.Options {
|
||||
if opt == "recursive" {
|
||||
recursive = true
|
||||
continue
|
||||
}
|
||||
|
||||
return "", false, xerrors.Errorf("unknown option %q in struct field tag", opt)
|
||||
}
|
||||
|
||||
return strings.ReplaceAll(tag.Name, "_", " "), recursive, nil
|
||||
}
|
||||
|
||||
func isStructOrStructPointer(t reflect.Type) bool {
|
||||
return t.Kind() == reflect.Struct || (t.Kind() == reflect.Pointer && t.Elem().Kind() == reflect.Struct)
|
||||
}
|
||||
|
||||
// typeToTableHeaders converts a type to a slice of column names. If the given
|
||||
// type is invalid (not a struct or a pointer to a struct, has invalid table
|
||||
// tags, etc.), an error is returned.
|
||||
func typeToTableHeaders(t reflect.Type) ([]string, error) {
|
||||
if !isStructOrStructPointer(t) {
|
||||
return nil, xerrors.Errorf("typeToTableHeaders called with a non-struct or a non-pointer-to-a-struct type")
|
||||
}
|
||||
if t.Kind() == reflect.Pointer {
|
||||
t = t.Elem()
|
||||
}
|
||||
|
||||
headers := []string{}
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
name, recursive, err := parseTableStructTag(field)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse struct tags for field %q in type %q: %w", field.Name, t.String(), err)
|
||||
}
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldType := field.Type
|
||||
if recursive {
|
||||
if !isStructOrStructPointer(fieldType) {
|
||||
return nil, xerrors.Errorf("field %q in type %q is marked as recursive but does not contain a struct or a pointer to a struct", field.Name, t.String())
|
||||
}
|
||||
|
||||
childNames, err := typeToTableHeaders(fieldType)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get child field header names for field %q in type %q: %w", field.Name, fieldType.String(), err)
|
||||
}
|
||||
for _, childName := range childNames {
|
||||
headers = append(headers, fmt.Sprintf("%s %s", name, childName))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
headers = append(headers, name)
|
||||
}
|
||||
|
||||
return headers, nil
|
||||
}
|
||||
|
||||
// valueToTableMap converts a struct to a map of column name to value. If the
|
||||
// given type is invalid (not a struct or a pointer to a struct, has invalid
|
||||
// table tags, etc.), an error is returned.
|
||||
func valueToTableMap(val reflect.Value) (map[string]any, error) {
|
||||
if !isStructOrStructPointer(val.Type()) {
|
||||
return nil, xerrors.Errorf("valueToTableMap called with a non-struct or a non-pointer-to-a-struct type")
|
||||
}
|
||||
if val.Kind() == reflect.Pointer {
|
||||
if val.IsNil() {
|
||||
// No data for this struct, so return an empty map. All values will
|
||||
// be rendered as nil in the resulting table.
|
||||
return map[string]any{}, nil
|
||||
}
|
||||
|
||||
val = val.Elem()
|
||||
}
|
||||
|
||||
row := map[string]any{}
|
||||
for i := 0; i < val.NumField(); i++ {
|
||||
field := val.Type().Field(i)
|
||||
fieldVal := val.Field(i)
|
||||
name, recursive, err := parseTableStructTag(field)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse struct tags for field %q in type %T: %w", field.Name, val, err)
|
||||
}
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Recurse if it's a struct.
|
||||
fieldType := field.Type
|
||||
if recursive {
|
||||
if !isStructOrStructPointer(fieldType) {
|
||||
return nil, xerrors.Errorf("field %q in type %q is marked as recursive but does not contain a struct or a pointer to a struct", field.Name, fieldType.String())
|
||||
}
|
||||
|
||||
// valueToTableMap does nothing on pointers so we don't need to
|
||||
// filter here.
|
||||
childMap, err := valueToTableMap(fieldVal)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get child field values for field %q in type %q: %w", field.Name, fieldType.String(), err)
|
||||
}
|
||||
for childName, childValue := range childMap {
|
||||
row[fmt.Sprintf("%s %s", name, childName)] = childValue
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Otherwise, we just use the field value.
|
||||
row[name] = val.Field(i).Interface()
|
||||
}
|
||||
|
||||
return row, nil
|
||||
}
|
||||
|
||||
@@ -0,0 +1,352 @@
|
||||
package cliui_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/cli/cliui"
|
||||
)
|
||||
|
||||
type stringWrapper struct {
|
||||
str string
|
||||
}
|
||||
|
||||
var _ fmt.Stringer = stringWrapper{}
|
||||
|
||||
func (s stringWrapper) String() string {
|
||||
return s.str
|
||||
}
|
||||
|
||||
type tableTest1 struct {
|
||||
Name string `table:"name"`
|
||||
NotIncluded string // no table tag
|
||||
Age int `table:"age"`
|
||||
Roles []string `table:"roles"`
|
||||
Sub1 tableTest2 `table:"sub_1,recursive"`
|
||||
Sub2 *tableTest2 `table:"sub_2,recursive"`
|
||||
Sub3 tableTest3 `table:"sub 3,recursive"`
|
||||
Sub4 tableTest2 `table:"sub 4"` // not recursive
|
||||
|
||||
// Types with special formatting.
|
||||
Time time.Time `table:"time"`
|
||||
TimePtr *time.Time `table:"time_ptr"`
|
||||
}
|
||||
|
||||
type tableTest2 struct {
|
||||
Name stringWrapper `table:"name"`
|
||||
Age int `table:"age"`
|
||||
NotIncluded string `table:"-"`
|
||||
}
|
||||
|
||||
type tableTest3 struct {
|
||||
NotIncluded string // no table tag
|
||||
Sub tableTest2 `table:"inner,recursive"`
|
||||
}
|
||||
|
||||
func Test_DisplayTable(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
someTime := time.Date(2022, 8, 2, 15, 49, 10, 0, time.Local)
|
||||
in := []tableTest1{
|
||||
{
|
||||
Name: "foo",
|
||||
Age: 10,
|
||||
Roles: []string{"a", "b", "c"},
|
||||
Sub1: tableTest2{
|
||||
Name: stringWrapper{str: "foo1"},
|
||||
Age: 11,
|
||||
},
|
||||
Sub2: &tableTest2{
|
||||
Name: stringWrapper{str: "foo2"},
|
||||
Age: 12,
|
||||
},
|
||||
Sub3: tableTest3{
|
||||
Sub: tableTest2{
|
||||
Name: stringWrapper{str: "foo3"},
|
||||
Age: 13,
|
||||
},
|
||||
},
|
||||
Sub4: tableTest2{
|
||||
Name: stringWrapper{str: "foo4"},
|
||||
Age: 14,
|
||||
},
|
||||
Time: someTime,
|
||||
TimePtr: &someTime,
|
||||
},
|
||||
{
|
||||
Name: "bar",
|
||||
Age: 20,
|
||||
Roles: []string{"a"},
|
||||
Sub1: tableTest2{
|
||||
Name: stringWrapper{str: "bar1"},
|
||||
Age: 21,
|
||||
},
|
||||
Sub2: nil,
|
||||
Sub3: tableTest3{
|
||||
Sub: tableTest2{
|
||||
Name: stringWrapper{str: "bar3"},
|
||||
Age: 23,
|
||||
},
|
||||
},
|
||||
Sub4: tableTest2{
|
||||
Name: stringWrapper{str: "bar4"},
|
||||
Age: 24,
|
||||
},
|
||||
Time: someTime,
|
||||
TimePtr: nil,
|
||||
},
|
||||
{
|
||||
Name: "baz",
|
||||
Age: 30,
|
||||
Roles: nil,
|
||||
Sub1: tableTest2{
|
||||
Name: stringWrapper{str: "baz1"},
|
||||
Age: 31,
|
||||
},
|
||||
Sub2: nil,
|
||||
Sub3: tableTest3{
|
||||
Sub: tableTest2{
|
||||
Name: stringWrapper{str: "baz3"},
|
||||
Age: 33,
|
||||
},
|
||||
},
|
||||
Sub4: tableTest2{
|
||||
Name: stringWrapper{str: "baz4"},
|
||||
Age: 34,
|
||||
},
|
||||
Time: someTime,
|
||||
TimePtr: nil,
|
||||
},
|
||||
}
|
||||
|
||||
// This test tests skipping fields without table tags, recursion, pointer
|
||||
// dereferencing, and nil pointer skipping.
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
expected := `
|
||||
NAME AGE ROLES SUB 1 NAME SUB 1 AGE SUB 2 NAME SUB 2 AGE SUB 3 INNER NAME SUB 3 INNER AGE SUB 4 TIME TIME PTR
|
||||
foo 10 [a b c] foo1 11 foo2 12 foo3 13 {foo4 14 } Aug 2 15:49:10 Aug 2 15:49:10
|
||||
bar 20 [a] bar1 21 <nil> <nil> bar3 23 {bar4 24 } Aug 2 15:49:10 <nil>
|
||||
baz 30 [] baz1 31 <nil> <nil> baz3 33 {baz4 34 } Aug 2 15:49:10 <nil>
|
||||
`
|
||||
|
||||
// Test with non-pointer values.
|
||||
out, err := cliui.DisplayTable(in, "", nil)
|
||||
log.Println("rendered table:\n" + out)
|
||||
require.NoError(t, err)
|
||||
compareTables(t, expected, out)
|
||||
|
||||
// Test with pointer values.
|
||||
inPtr := make([]*tableTest1, len(in))
|
||||
for i, v := range in {
|
||||
v := v
|
||||
inPtr[i] = &v
|
||||
}
|
||||
out, err = cliui.DisplayTable(inPtr, "", nil)
|
||||
require.NoError(t, err)
|
||||
compareTables(t, expected, out)
|
||||
})
|
||||
|
||||
t.Run("Sort", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
expected := `
|
||||
NAME AGE ROLES SUB 1 NAME SUB 1 AGE SUB 2 NAME SUB 2 AGE SUB 3 INNER NAME SUB 3 INNER AGE SUB 4 TIME TIME PTR
|
||||
bar 20 [a] bar1 21 <nil> <nil> bar3 23 {bar4 24 } Aug 2 15:49:10 <nil>
|
||||
baz 30 [] baz1 31 <nil> <nil> baz3 33 {baz4 34 } Aug 2 15:49:10 <nil>
|
||||
foo 10 [a b c] foo1 11 foo2 12 foo3 13 {foo4 14 } Aug 2 15:49:10 Aug 2 15:49:10
|
||||
`
|
||||
|
||||
out, err := cliui.DisplayTable(in, "name", nil)
|
||||
log.Println("rendered table:\n" + out)
|
||||
require.NoError(t, err)
|
||||
compareTables(t, expected, out)
|
||||
})
|
||||
|
||||
t.Run("Filter", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
expected := `
|
||||
NAME SUB 1 NAME SUB 3 INNER NAME TIME
|
||||
foo foo1 foo3 Aug 2 15:49:10
|
||||
bar bar1 bar3 Aug 2 15:49:10
|
||||
baz baz1 baz3 Aug 2 15:49:10
|
||||
`
|
||||
|
||||
out, err := cliui.DisplayTable(in, "", []string{"name", "sub_1_name", "sub_3 inner name", "time"})
|
||||
log.Println("rendered table:\n" + out)
|
||||
require.NoError(t, err)
|
||||
compareTables(t, expected, out)
|
||||
})
|
||||
|
||||
// This test ensures that safeties against invalid use of `table` tags
|
||||
// causes errors (even without data).
|
||||
t.Run("Errors", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("NotSlice", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var in string
|
||||
_, err := cliui.DisplayTable(in, "", nil)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("BadSortColumn", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
_, err := cliui.DisplayTable(in, "bad_column_does_not_exist", nil)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("BadFilterColumns", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
_, err := cliui.DisplayTable(in, "", []string{"name", "bad_column_does_not_exist"})
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("Interfaces", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("WithoutData", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var in []any
|
||||
_, err := cliui.DisplayTable(in, "", nil)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("WithData", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
in := []any{tableTest1{}}
|
||||
_, err := cliui.DisplayTable(in, "", nil)
|
||||
require.Error(t, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("NotStruct", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("WithoutData", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var in []string
|
||||
_, err := cliui.DisplayTable(in, "", nil)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("WithData", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
in := []string{"foo", "bar", "baz"}
|
||||
_, err := cliui.DisplayTable(in, "", nil)
|
||||
require.Error(t, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("NoTableTags", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type noTableTagsTest struct {
|
||||
Field string `json:"field"`
|
||||
}
|
||||
|
||||
t.Run("WithoutData", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var in []noTableTagsTest
|
||||
_, err := cliui.DisplayTable(in, "", nil)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("WithData", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
in := []noTableTagsTest{{Field: "hi"}}
|
||||
_, err := cliui.DisplayTable(in, "", nil)
|
||||
require.Error(t, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("InvalidTag/NoName", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type noNameTest struct {
|
||||
Field string `table:""`
|
||||
}
|
||||
|
||||
t.Run("WithoutData", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var in []noNameTest
|
||||
_, err := cliui.DisplayTable(in, "", nil)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("WithData", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
in := []noNameTest{{Field: "test"}}
|
||||
_, err := cliui.DisplayTable(in, "", nil)
|
||||
require.Error(t, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("InvalidTag/BadSyntax", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type invalidSyntaxTest struct {
|
||||
Field string `table:"asda,asdjada"`
|
||||
}
|
||||
|
||||
t.Run("WithoutData", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var in []invalidSyntaxTest
|
||||
_, err := cliui.DisplayTable(in, "", nil)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("WithData", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
in := []invalidSyntaxTest{{Field: "test"}}
|
||||
_, err := cliui.DisplayTable(in, "", nil)
|
||||
require.Error(t, err)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// compareTables normalizes the incoming table lines
|
||||
func compareTables(t *testing.T, expected, out string) {
|
||||
t.Helper()
|
||||
|
||||
expectedLines := strings.Split(strings.TrimSpace(expected), "\n")
|
||||
gotLines := strings.Split(strings.TrimSpace(out), "\n")
|
||||
assert.Equal(t, len(expectedLines), len(gotLines), "expected line count does not match generated line count")
|
||||
|
||||
// Map the expected and got lines to normalize them.
|
||||
expectedNormalized := make([]string, len(expectedLines))
|
||||
gotNormalized := make([]string, len(gotLines))
|
||||
normalizeLine := func(s string) string {
|
||||
return strings.Join(strings.Fields(strings.TrimSpace(s)), " ")
|
||||
}
|
||||
for i, s := range expectedLines {
|
||||
expectedNormalized[i] = normalizeLine(s)
|
||||
}
|
||||
for i, s := range gotLines {
|
||||
gotNormalized[i] = normalizeLine(s)
|
||||
}
|
||||
|
||||
require.Equal(t, expectedNormalized, gotNormalized, "expected lines to match generated lines")
|
||||
}
|
||||
+36
-66
@@ -137,7 +137,6 @@ func configSSH() *cobra.Command {
|
||||
sshConfigFile string
|
||||
sshConfigOpts sshConfigOptions
|
||||
usePreviousOpts bool
|
||||
coderConfigFile string
|
||||
dryRun bool
|
||||
skipProxyCommand bool
|
||||
wireguard bool
|
||||
@@ -156,8 +155,9 @@ func configSSH() *cobra.Command {
|
||||
Command: "coder config-ssh --dry-run",
|
||||
},
|
||||
),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -197,15 +197,7 @@ func configSSH() *cobra.Command {
|
||||
// Parse the previous configuration only if config-ssh
|
||||
// has been run previously.
|
||||
var lastConfig *sshConfigOptions
|
||||
var ok bool
|
||||
var coderConfigRaw []byte
|
||||
if coderConfigFile, coderConfigRaw, ok = readDeprecatedCoderConfigFile(homedir, coderConfigFile); ok {
|
||||
// Deprecated: Remove after migration period.
|
||||
changes = append(changes, fmt.Sprintf("Remove old auto-generated coder config file at %s", coderConfigFile))
|
||||
// Backwards compate, restore old options.
|
||||
c := sshConfigParseLastOptions(bytes.NewReader(coderConfigRaw))
|
||||
lastConfig = &c
|
||||
} else if section, ok := sshConfigGetCoderSection(configRaw); ok {
|
||||
if section, ok := sshConfigGetCoderSection(configRaw); ok {
|
||||
c := sshConfigParseLastOptions(bytes.NewReader(section))
|
||||
lastConfig = &c
|
||||
}
|
||||
@@ -236,6 +228,8 @@ func configSSH() *cobra.Command {
|
||||
}
|
||||
// Selecting "no" will use the last config.
|
||||
sshConfigOpts = *lastConfig
|
||||
} else {
|
||||
changes = append(changes, "Use new SSH options")
|
||||
}
|
||||
// Only print when prompts are shown.
|
||||
if yes, _ := cmd.Flags().GetBool("yes"); !yes {
|
||||
@@ -244,14 +238,6 @@ func configSSH() *cobra.Command {
|
||||
}
|
||||
|
||||
configModified := configRaw
|
||||
|
||||
// Check for the presence of the coder Include
|
||||
// statement is present and add if missing.
|
||||
// Deprecated: Remove after migration period.
|
||||
if configModified, ok = removeDeprecatedSSHIncludeStatement(configModified); ok {
|
||||
changes = append(changes, fmt.Sprintf("Remove %q from %s", "Include coder", sshConfigFile))
|
||||
}
|
||||
|
||||
root := createConfig(cmd)
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
@@ -312,17 +298,34 @@ func configSSH() *cobra.Command {
|
||||
_, _ = buf.Write(after)
|
||||
|
||||
if !bytes.Equal(configModified, buf.Bytes()) {
|
||||
changes = append(changes, fmt.Sprintf("Update coder config section in %s", sshConfigFile))
|
||||
changes = append(changes, fmt.Sprintf("Update the coder section in %s", sshConfigFile))
|
||||
configModified = buf.Bytes()
|
||||
}
|
||||
|
||||
if len(changes) > 0 {
|
||||
dryRunDisclaimer := ""
|
||||
if dryRun {
|
||||
dryRunDisclaimer = " (dry-run, no changes will be made)"
|
||||
if len(changes) == 0 {
|
||||
_, _ = fmt.Fprintf(out, "No changes to make.\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
_, _ = fmt.Fprintf(out, "Dry run, the following changes would be made to your SSH configuration:\n\n * %s\n\n", strings.Join(changes, "\n * "))
|
||||
|
||||
color := isTTYOut(cmd)
|
||||
diff, err := diffBytes(sshConfigFile, configRaw, configModified, color)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("diff failed: %w", err)
|
||||
}
|
||||
if len(diff) > 0 {
|
||||
// Write diff to stdout.
|
||||
_, _ = fmt.Fprintf(cmd.OutOrStdout(), "%s", diff)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(changes) > 0 {
|
||||
_, err = cliui.Prompt(cmd, cliui.PromptOptions{
|
||||
Text: fmt.Sprintf("The following changes will be made to your SSH configuration:\n\n * %s\n\n Continue?%s", strings.Join(changes, "\n * "), dryRunDisclaimer),
|
||||
Text: fmt.Sprintf("The following changes will be made to your SSH configuration:\n\n * %s\n\n Continue?", strings.Join(changes, "\n * ")),
|
||||
IsConfirm: true,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -334,47 +337,18 @@ func configSSH() *cobra.Command {
|
||||
}
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
color := isTTYOut(cmd)
|
||||
diffFns := []func() ([]byte, error){
|
||||
func() ([]byte, error) { return diffBytes(sshConfigFile, configRaw, configModified, color) },
|
||||
}
|
||||
if len(coderConfigRaw) > 0 {
|
||||
// Deprecated: Remove after migration period.
|
||||
diffFns = append(diffFns, func() ([]byte, error) { return diffBytes(coderConfigFile, coderConfigRaw, nil, color) })
|
||||
}
|
||||
|
||||
for _, diffFn := range diffFns {
|
||||
diff, err := diffFn()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("diff failed: %w", err)
|
||||
}
|
||||
if len(diff) > 0 {
|
||||
// Write diff to stdout.
|
||||
_, _ = fmt.Fprintf(cmd.OutOrStdout(), "\n%s", diff)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !bytes.Equal(configRaw, configModified) {
|
||||
err = writeWithTempFileAndMove(sshConfigFile, bytes.NewReader(configModified))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("write ssh config failed: %w", err)
|
||||
}
|
||||
}
|
||||
// Deprecated: Remove after migration period.
|
||||
if len(coderConfigRaw) > 0 {
|
||||
err = os.Remove(coderConfigFile)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("remove coder config failed: %w", err)
|
||||
}
|
||||
if !bytes.Equal(configRaw, configModified) {
|
||||
err = writeWithTempFileAndMove(sshConfigFile, bytes.NewReader(configModified))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("write ssh config failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(workspaceConfigs) > 0 {
|
||||
_, _ = fmt.Fprintln(out, "You should now be able to ssh into your workspace.")
|
||||
_, _ = fmt.Fprintf(out, "For example, try running:\n\n\t$ ssh coder.%s\n\n", workspaceConfigs[0].Name)
|
||||
_, _ = fmt.Fprintf(out, "For example, try running:\n\n\t$ ssh coder.%s\n", workspaceConfigs[0].Name)
|
||||
} else {
|
||||
_, _ = fmt.Fprint(out, "You don't have any workspaces yet, try creating one with:\n\n\t$ coder create <workspace>\n\n")
|
||||
_, _ = fmt.Fprint(out, "You don't have any workspaces yet, try creating one with:\n\n\t$ coder create <workspace>\n")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
@@ -388,10 +362,6 @@ func configSSH() *cobra.Command {
|
||||
cliflag.BoolVarP(cmd.Flags(), &wireguard, "wireguard", "", "CODER_CONFIG_SSH_WIREGUARD", false, "Whether to use Wireguard for SSH tunneling.")
|
||||
_ = cmd.Flags().MarkHidden("wireguard")
|
||||
|
||||
// Deprecated: Remove after migration period.
|
||||
cmd.Flags().StringVar(&coderConfigFile, "test.ssh-coder-config-file", sshDefaultCoderConfigFileName, "Specifies the path to an Coder SSH config file. Useful for testing.")
|
||||
_ = cmd.Flags().MarkHidden("test.ssh-coder-config-file")
|
||||
|
||||
cliui.AllowSkipPrompt(cmd)
|
||||
|
||||
return cmd
|
||||
@@ -557,7 +527,7 @@ func currentBinPath(w io.Writer) (string, error) {
|
||||
|
||||
// diffBytes takes two byte slices and diffs them as if they were in a
|
||||
// file named name.
|
||||
//nolint: revive // Color is an option, not a control coupling.
|
||||
// nolint: revive // Color is an option, not a control coupling.
|
||||
func diffBytes(name string, b1, b2 []byte, color bool) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
var opts []write.Option
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// This file contains config-ssh definitions that are deprecated, they
|
||||
// will be removed after a migratory period.
|
||||
|
||||
const (
|
||||
sshDefaultCoderConfigFileName = "~/.ssh/coder"
|
||||
sshCoderConfigHeader = "# This file is managed by coder. DO NOT EDIT."
|
||||
)
|
||||
|
||||
// Regular expressions are used because SSH configs do not have
|
||||
// meaningful indentation and keywords are case-insensitive.
|
||||
var (
|
||||
// Find the semantically correct include statement. Since the user can
|
||||
// modify their configuration as they see fit, there could be:
|
||||
// - Leading indentation (space, tab)
|
||||
// - Trailing indentation (space, tab)
|
||||
// - Select newline after Include statement for cleaner removal
|
||||
// In the following cases, we will not recognize the Include statement
|
||||
// and leave as-is (i.e. they're not supported):
|
||||
// - User adds another file to the Include statement
|
||||
// - User adds a comment on the same line as the Include statement
|
||||
sshCoderIncludedRe = regexp.MustCompile(`(?m)^[\t ]*((?i)Include) coder[\t ]*[\r]?[\n]?$`)
|
||||
)
|
||||
|
||||
// removeDeprecatedSSHIncludeStatement checks for the Include coder statement
|
||||
// and returns modified = true if it was removed.
|
||||
func removeDeprecatedSSHIncludeStatement(data []byte) (modifiedData []byte, modified bool) {
|
||||
coderInclude := sshCoderIncludedRe.FindIndex(data)
|
||||
if coderInclude == nil {
|
||||
return data, false
|
||||
}
|
||||
|
||||
// Remove Include statement.
|
||||
d := append([]byte{}, data[:coderInclude[0]]...)
|
||||
d = append(d, data[coderInclude[1]:]...)
|
||||
data = d
|
||||
|
||||
return data, true
|
||||
}
|
||||
|
||||
// readDeprecatedCoderConfigFile reads the deprecated split config file.
|
||||
func readDeprecatedCoderConfigFile(homedir, coderConfigFile string) (name string, data []byte, ok bool) {
|
||||
if strings.HasPrefix(coderConfigFile, "~/") {
|
||||
coderConfigFile = filepath.Join(homedir, coderConfigFile[2:])
|
||||
}
|
||||
|
||||
b, err := os.ReadFile(coderConfigFile)
|
||||
if err != nil {
|
||||
return coderConfigFile, nil, false
|
||||
}
|
||||
if len(b) > 0 {
|
||||
if !bytes.HasPrefix(b, []byte(sshCoderConfigHeader)) {
|
||||
return coderConfigFile, nil, false
|
||||
}
|
||||
}
|
||||
return coderConfigFile, b, true
|
||||
}
|
||||
+8
-134
@@ -6,7 +6,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -30,15 +29,14 @@ import (
|
||||
"github.com/coder/coder/pty/ptytest"
|
||||
)
|
||||
|
||||
func sshConfigFileNames(t *testing.T) (sshConfig string, coderConfig string) {
|
||||
func sshConfigFileName(t *testing.T) (sshConfig string) {
|
||||
t.Helper()
|
||||
tmpdir := t.TempDir()
|
||||
dotssh := filepath.Join(tmpdir, ".ssh")
|
||||
err := os.Mkdir(dotssh, 0o700)
|
||||
require.NoError(t, err)
|
||||
n1 := filepath.Join(dotssh, "config")
|
||||
n2 := filepath.Join(dotssh, "coder")
|
||||
return n1, n2
|
||||
n := filepath.Join(dotssh, "config")
|
||||
return n
|
||||
}
|
||||
|
||||
func sshConfigFileCreate(t *testing.T, name string, data io.Reader) {
|
||||
@@ -135,7 +133,7 @@ func TestConfigSSH(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
|
||||
sshConfigFile, _ := sshConfigFileNames(t)
|
||||
sshConfigFile := sshConfigFileName(t)
|
||||
|
||||
tcpAddr, valid := listener.Addr().(*net.TCPAddr)
|
||||
require.True(t, valid)
|
||||
@@ -197,12 +195,10 @@ func TestConfigSSH_FileWriteAndOptionsFlow(t *testing.T) {
|
||||
}, "\n")
|
||||
|
||||
type writeConfig struct {
|
||||
ssh string
|
||||
coder string
|
||||
ssh string
|
||||
}
|
||||
type wantConfig struct {
|
||||
ssh string
|
||||
coderKept bool
|
||||
ssh string
|
||||
}
|
||||
type match struct {
|
||||
match, write string
|
||||
@@ -514,120 +510,6 @@ func TestConfigSSH_FileWriteAndOptionsFlow(t *testing.T) {
|
||||
"--yes",
|
||||
},
|
||||
},
|
||||
|
||||
// Tests for deprecated split coder config.
|
||||
{
|
||||
name: "Do not overwrite unknown coder config",
|
||||
writeConfig: writeConfig{
|
||||
ssh: strings.Join([]string{
|
||||
baseHeader,
|
||||
"",
|
||||
}, "\n"),
|
||||
coder: strings.Join([]string{
|
||||
"We're no strangers to love",
|
||||
"You know the rules and so do I (do I)",
|
||||
}, "\n"),
|
||||
},
|
||||
wantConfig: wantConfig{
|
||||
coderKept: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Transfer options from coder to ssh config",
|
||||
writeConfig: writeConfig{
|
||||
ssh: strings.Join([]string{
|
||||
"Include coder",
|
||||
"",
|
||||
}, "\n"),
|
||||
coder: strings.Join([]string{
|
||||
"# This file is managed by coder. DO NOT EDIT.",
|
||||
"#",
|
||||
"# You should not hand-edit this file, all changes will be lost when running",
|
||||
"# \"coder config-ssh\".",
|
||||
"#",
|
||||
"# Last config-ssh options:",
|
||||
"# :ssh-option=ForwardAgent=yes",
|
||||
"#",
|
||||
}, "\n"),
|
||||
},
|
||||
wantConfig: wantConfig{
|
||||
ssh: strings.Join([]string{
|
||||
headerStart,
|
||||
"# Last config-ssh options:",
|
||||
"# :ssh-option=ForwardAgent=yes",
|
||||
"#",
|
||||
headerEnd,
|
||||
"",
|
||||
}, "\n"),
|
||||
},
|
||||
matches: []match{
|
||||
{match: "Use new options?", write: "no"},
|
||||
{match: "Continue?", write: "yes"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Allow overwriting previous options from coder config",
|
||||
writeConfig: writeConfig{
|
||||
ssh: strings.Join([]string{
|
||||
"Include coder",
|
||||
"",
|
||||
}, "\n"),
|
||||
coder: strings.Join([]string{
|
||||
"# This file is managed by coder. DO NOT EDIT.",
|
||||
"#",
|
||||
"# You should not hand-edit this file, all changes will be lost when running",
|
||||
"# \"coder config-ssh\".",
|
||||
"#",
|
||||
"# Last config-ssh options:",
|
||||
"# :ssh-option=ForwardAgent=yes",
|
||||
"#",
|
||||
}, "\n"),
|
||||
},
|
||||
wantConfig: wantConfig{
|
||||
ssh: strings.Join([]string{
|
||||
baseHeader,
|
||||
"",
|
||||
}, "\n"),
|
||||
},
|
||||
matches: []match{
|
||||
{match: "Use new options?", write: "yes"},
|
||||
{match: "Continue?", write: "yes"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Allow overwriting previous options from coder config when they differ",
|
||||
writeConfig: writeConfig{
|
||||
ssh: strings.Join([]string{
|
||||
"Include coder",
|
||||
"",
|
||||
}, "\n"),
|
||||
coder: strings.Join([]string{
|
||||
"# This file is managed by coder. DO NOT EDIT.",
|
||||
"#",
|
||||
"# You should not hand-edit this file, all changes will be lost when running",
|
||||
"# \"coder config-ssh\".",
|
||||
"#",
|
||||
"# Last config-ssh options:",
|
||||
"# :ssh-option=ForwardAgent=yes",
|
||||
"#",
|
||||
}, "\n"),
|
||||
},
|
||||
wantConfig: wantConfig{
|
||||
ssh: strings.Join([]string{
|
||||
headerStart,
|
||||
"# Last config-ssh options:",
|
||||
"# :ssh-option=ForwardAgent=no",
|
||||
"#",
|
||||
headerEnd,
|
||||
"",
|
||||
}, "\n"),
|
||||
},
|
||||
args: []string{"--ssh-option", "ForwardAgent=no"},
|
||||
matches: []match{
|
||||
{match: "Use new options?", write: "yes"},
|
||||
{match: "Continue?", write: "yes"},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
@@ -645,18 +527,14 @@ func TestConfigSSH_FileWriteAndOptionsFlow(t *testing.T) {
|
||||
)
|
||||
|
||||
// Prepare ssh config files.
|
||||
sshConfigName, coderConfigName := sshConfigFileNames(t)
|
||||
sshConfigName := sshConfigFileName(t)
|
||||
if tt.writeConfig.ssh != "" {
|
||||
sshConfigFileCreate(t, sshConfigName, strings.NewReader(tt.writeConfig.ssh))
|
||||
}
|
||||
if tt.writeConfig.coder != "" {
|
||||
sshConfigFileCreate(t, coderConfigName, strings.NewReader(tt.writeConfig.coder))
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"config-ssh",
|
||||
"--ssh-config-file", sshConfigName,
|
||||
"--test.ssh-coder-config-file", coderConfigName,
|
||||
}
|
||||
args = append(args, tt.args...)
|
||||
cmd, root := clitest.New(t, args...)
|
||||
@@ -685,10 +563,6 @@ func TestConfigSSH_FileWriteAndOptionsFlow(t *testing.T) {
|
||||
got := sshConfigFileRead(t, sshConfigName)
|
||||
assert.Equal(t, tt.wantConfig.ssh, got)
|
||||
}
|
||||
if !tt.wantConfig.coderKept {
|
||||
_, err := os.ReadFile(coderConfigName)
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -778,7 +652,7 @@ func TestConfigSSH_Hostnames(t *testing.T) {
|
||||
workspace := coderdtest.CreateWorkspace(t, client, user.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJob(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
sshConfigFile, _ := sshConfigFileNames(t)
|
||||
sshConfigFile := sshConfigFileName(t)
|
||||
|
||||
cmd, root := clitest.New(t, "config-ssh", "--ssh-config-file", sshConfigFile)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
|
||||
+1
-1
@@ -27,7 +27,7 @@ func create() *cobra.Command {
|
||||
Use: "create [name]",
|
||||
Short: "Create a workspace from a template",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
+1
-1
@@ -28,7 +28,7 @@ func deleteWorkspace() *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
)
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Run("WithParameter", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerD: true})
|
||||
|
||||
+102
@@ -0,0 +1,102 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/cli/cliui"
|
||||
"github.com/coder/coder/codersdk"
|
||||
)
|
||||
|
||||
var featureColumns = []string{"Name", "Entitlement", "Enabled", "Limit", "Actual"}
|
||||
|
||||
func features() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Short: "List features",
|
||||
Use: "features",
|
||||
Aliases: []string{"feature"},
|
||||
}
|
||||
cmd.AddCommand(
|
||||
featuresList(),
|
||||
)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func featuresList() *cobra.Command {
|
||||
var (
|
||||
columns []string
|
||||
outputFormat string
|
||||
)
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "list",
|
||||
Aliases: []string{"ls"},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entitlements, err := client.Entitlements(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out := ""
|
||||
switch outputFormat {
|
||||
case "table", "":
|
||||
out, err = displayFeatures(columns, entitlements.Features)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("render table: %w", err)
|
||||
}
|
||||
case "json":
|
||||
outBytes, err := json.Marshal(entitlements)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("marshal features to JSON: %w", err)
|
||||
}
|
||||
|
||||
out = string(outBytes)
|
||||
default:
|
||||
return xerrors.Errorf(`unknown output format %q, only "table" and "json" are supported`, outputFormat)
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(cmd.OutOrStdout(), out)
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().StringArrayVarP(&columns, "column", "c", featureColumns,
|
||||
fmt.Sprintf("Specify a column to filter in the table. Available columns are: %s",
|
||||
strings.Join(featureColumns, ", ")))
|
||||
cmd.Flags().StringVarP(&outputFormat, "output", "o", "table", "Output format. Available formats are: table, json.")
|
||||
return cmd
|
||||
}
|
||||
|
||||
type featureRow struct {
|
||||
Name string `table:"name"`
|
||||
Entitlement string `table:"entitlement"`
|
||||
Enabled bool `table:"enabled"`
|
||||
Limit *int64 `table:"limit"`
|
||||
Actual *int64 `table:"actual"`
|
||||
}
|
||||
|
||||
// displayFeatures will return a table displaying all features passed in.
|
||||
// filterColumns must be a subset of the feature fields and will determine which
|
||||
// columns to display
|
||||
func displayFeatures(filterColumns []string, features map[string]codersdk.Feature) (string, error) {
|
||||
rows := make([]featureRow, 0, len(features))
|
||||
for name, feat := range features {
|
||||
rows = append(rows, featureRow{
|
||||
Name: name,
|
||||
Entitlement: string(feat.Entitlement),
|
||||
Enabled: feat.Enabled,
|
||||
Limit: feat.Limit,
|
||||
Actual: feat.Actual,
|
||||
})
|
||||
}
|
||||
|
||||
return cliui.DisplayTable(rows, "name", filterColumns)
|
||||
}
|
||||
@@ -0,0 +1,66 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/cli/clitest"
|
||||
"github.com/coder/coder/coderd/coderdtest"
|
||||
"github.com/coder/coder/codersdk"
|
||||
"github.com/coder/coder/pty/ptytest"
|
||||
)
|
||||
|
||||
func TestFeaturesList(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Run("Table", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
cmd, root := clitest.New(t, "features", "list")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
pty := ptytest.New(t)
|
||||
cmd.SetIn(pty.Input())
|
||||
cmd.SetOut(pty.Output())
|
||||
errC := make(chan error)
|
||||
go func() {
|
||||
errC <- cmd.Execute()
|
||||
}()
|
||||
require.NoError(t, <-errC)
|
||||
pty.ExpectMatch("user_limit")
|
||||
pty.ExpectMatch("not_entitled")
|
||||
})
|
||||
t.Run("JSON", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client := coderdtest.New(t, nil)
|
||||
coderdtest.CreateFirstUser(t, client)
|
||||
cmd, root := clitest.New(t, "features", "list", "-o", "json")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
doneChan := make(chan struct{})
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
cmd.SetOut(buf)
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
<-doneChan
|
||||
|
||||
var entitlements codersdk.Entitlements
|
||||
err := json.Unmarshal(buf.Bytes(), &entitlements)
|
||||
require.NoError(t, err, "unmarshal JSON output")
|
||||
assert.Len(t, entitlements.Features, 2)
|
||||
assert.Empty(t, entitlements.Warnings)
|
||||
assert.Equal(t, codersdk.EntitlementNotEntitled,
|
||||
entitlements.Features[codersdk.FeatureUserLimit].Entitlement)
|
||||
assert.Equal(t, codersdk.EntitlementNotEntitled,
|
||||
entitlements.Features[codersdk.FeatureAuditLog].Entitlement)
|
||||
assert.False(t, entitlements.HasLicense)
|
||||
})
|
||||
}
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
func TestGitSSH(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Run("Dial", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerD: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
|
||||
+76
-48
@@ -5,7 +5,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/jedib0t/go-pretty/v6/table"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/coder/coder/cli/cliui"
|
||||
@@ -14,8 +13,55 @@ import (
|
||||
"github.com/coder/coder/codersdk"
|
||||
)
|
||||
|
||||
type workspaceListRow struct {
|
||||
Workspace string `table:"workspace"`
|
||||
Template string `table:"template"`
|
||||
Status string `table:"status"`
|
||||
LastBuilt string `table:"last built"`
|
||||
Outdated bool `table:"outdated"`
|
||||
StartsAt string `table:"starts at"`
|
||||
StopsAfter string `table:"stops after"`
|
||||
}
|
||||
|
||||
func workspaceListRowFromWorkspace(now time.Time, usersByID map[uuid.UUID]codersdk.User, workspace codersdk.Workspace) workspaceListRow {
|
||||
status := codersdk.WorkspaceDisplayStatus(workspace.LatestBuild.Job.Status, workspace.LatestBuild.Transition)
|
||||
|
||||
lastBuilt := now.UTC().Sub(workspace.LatestBuild.Job.CreatedAt).Truncate(time.Second)
|
||||
autostartDisplay := "-"
|
||||
if !ptr.NilOrEmpty(workspace.AutostartSchedule) {
|
||||
if sched, err := schedule.Weekly(*workspace.AutostartSchedule); err == nil {
|
||||
autostartDisplay = fmt.Sprintf("%s %s (%s)", sched.Time(), sched.DaysOfWeek(), sched.Location())
|
||||
}
|
||||
}
|
||||
|
||||
autostopDisplay := "-"
|
||||
if !ptr.NilOrZero(workspace.TTLMillis) {
|
||||
dur := time.Duration(*workspace.TTLMillis) * time.Millisecond
|
||||
autostopDisplay = durationDisplay(dur)
|
||||
if !workspace.LatestBuild.Deadline.IsZero() && workspace.LatestBuild.Deadline.Time.After(now) && status == "Running" {
|
||||
remaining := time.Until(workspace.LatestBuild.Deadline.Time)
|
||||
autostopDisplay = fmt.Sprintf("%s (%s)", autostopDisplay, relative(remaining))
|
||||
}
|
||||
}
|
||||
|
||||
user := usersByID[workspace.OwnerID]
|
||||
return workspaceListRow{
|
||||
Workspace: user.Username + "/" + workspace.Name,
|
||||
Template: workspace.TemplateName,
|
||||
Status: status,
|
||||
LastBuilt: durationDisplay(lastBuilt),
|
||||
Outdated: workspace.Outdated,
|
||||
StartsAt: autostartDisplay,
|
||||
StopsAfter: autostopDisplay,
|
||||
}
|
||||
}
|
||||
|
||||
func list() *cobra.Command {
|
||||
var columns []string
|
||||
var (
|
||||
columns []string
|
||||
searchQuery string
|
||||
me bool
|
||||
)
|
||||
cmd := &cobra.Command{
|
||||
Annotations: workspaceCommand,
|
||||
Use: "list",
|
||||
@@ -23,19 +69,29 @@ func list() *cobra.Command {
|
||||
Aliases: []string{"ls"},
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
workspaces, err := client.Workspaces(cmd.Context(), codersdk.WorkspaceFilter{})
|
||||
filter := codersdk.WorkspaceFilter{
|
||||
FilterQuery: searchQuery,
|
||||
}
|
||||
if me {
|
||||
myUser, err := client.User(cmd.Context(), codersdk.Me)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filter.Owner = myUser.Username
|
||||
}
|
||||
workspaces, err := client.Workspaces(cmd.Context(), filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(workspaces) == 0 {
|
||||
_, _ = fmt.Fprintln(cmd.OutOrStdout(), cliui.Styles.Prompt.String()+"No workspaces found! Create one:")
|
||||
_, _ = fmt.Fprintln(cmd.OutOrStdout())
|
||||
_, _ = fmt.Fprintln(cmd.OutOrStdout(), " "+cliui.Styles.Code.Render("coder create <name>"))
|
||||
_, _ = fmt.Fprintln(cmd.OutOrStdout())
|
||||
_, _ = fmt.Fprintln(cmd.ErrOrStderr(), cliui.Styles.Prompt.String()+"No workspaces found! Create one:")
|
||||
_, _ = fmt.Fprintln(cmd.ErrOrStderr())
|
||||
_, _ = fmt.Fprintln(cmd.ErrOrStderr(), " "+cliui.Styles.Code.Render("coder create <name>"))
|
||||
_, _ = fmt.Fprintln(cmd.ErrOrStderr())
|
||||
return nil
|
||||
}
|
||||
users, err := client.Users(cmd.Context(), codersdk.UsersRequest{})
|
||||
@@ -47,52 +103,24 @@ func list() *cobra.Command {
|
||||
usersByID[user.ID] = user
|
||||
}
|
||||
|
||||
tableWriter := cliui.Table()
|
||||
header := table.Row{"workspace", "template", "status", "last built", "outdated", "starts at", "stops after"}
|
||||
tableWriter.AppendHeader(header)
|
||||
tableWriter.SortBy([]table.SortBy{{
|
||||
Name: "workspace",
|
||||
}})
|
||||
tableWriter.SetColumnConfigs(cliui.FilterTableColumns(header, columns))
|
||||
|
||||
now := time.Now()
|
||||
for _, workspace := range workspaces {
|
||||
status := codersdk.WorkspaceDisplayStatus(workspace.LatestBuild.Job.Status, workspace.LatestBuild.Transition)
|
||||
|
||||
lastBuilt := time.Now().UTC().Sub(workspace.LatestBuild.Job.CreatedAt).Truncate(time.Second)
|
||||
autostartDisplay := "-"
|
||||
if !ptr.NilOrEmpty(workspace.AutostartSchedule) {
|
||||
if sched, err := schedule.Weekly(*workspace.AutostartSchedule); err == nil {
|
||||
autostartDisplay = fmt.Sprintf("%s %s (%s)", sched.Time(), sched.DaysOfWeek(), sched.Location())
|
||||
}
|
||||
}
|
||||
|
||||
autostopDisplay := "-"
|
||||
if !ptr.NilOrZero(workspace.TTLMillis) {
|
||||
dur := time.Duration(*workspace.TTLMillis) * time.Millisecond
|
||||
autostopDisplay = durationDisplay(dur)
|
||||
if !workspace.LatestBuild.Deadline.IsZero() && workspace.LatestBuild.Deadline.After(now) && status == "Running" {
|
||||
remaining := time.Until(workspace.LatestBuild.Deadline)
|
||||
autostopDisplay = fmt.Sprintf("%s (%s)", autostopDisplay, relative(remaining))
|
||||
}
|
||||
}
|
||||
|
||||
user := usersByID[workspace.OwnerID]
|
||||
tableWriter.AppendRow(table.Row{
|
||||
user.Username + "/" + workspace.Name,
|
||||
workspace.TemplateName,
|
||||
status,
|
||||
durationDisplay(lastBuilt),
|
||||
workspace.Outdated,
|
||||
autostartDisplay,
|
||||
autostopDisplay,
|
||||
})
|
||||
displayWorkspaces := make([]workspaceListRow, len(workspaces))
|
||||
for i, workspace := range workspaces {
|
||||
displayWorkspaces[i] = workspaceListRowFromWorkspace(now, usersByID, workspace)
|
||||
}
|
||||
_, err = fmt.Fprintln(cmd.OutOrStdout(), tableWriter.Render())
|
||||
|
||||
out, err := cliui.DisplayTable(displayWorkspaces, "workspace", columns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(cmd.OutOrStdout(), out)
|
||||
return err
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringArrayVarP(&columns, "column", "c", nil,
|
||||
"Specify a column to filter in the table.")
|
||||
cmd.Flags().StringVar(&searchQuery, "search", "", "Search for a workspace with a query.")
|
||||
cmd.Flags().BoolVar(&me, "me", false, "Only show workspaces owned by the current user.")
|
||||
return cmd
|
||||
}
|
||||
|
||||
+1
-1
@@ -16,7 +16,7 @@ func logout() *cobra.Command {
|
||||
Use: "logout",
|
||||
Short: "Remove the local authenticated session",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,11 +1,7 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"github.com/jedib0t/go-pretty/v6/table"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/coder/coder/cli/cliui"
|
||||
"github.com/coder/coder/codersdk"
|
||||
)
|
||||
|
||||
func parameters() *cobra.Command {
|
||||
@@ -30,29 +26,3 @@ func parameters() *cobra.Command {
|
||||
)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// displayParameters will return a table displaying all parameters passed in.
|
||||
// filterColumns must be a subset of the parameter fields and will determine which
|
||||
// columns to display
|
||||
func displayParameters(filterColumns []string, params ...codersdk.Parameter) string {
|
||||
tableWriter := cliui.Table()
|
||||
header := table.Row{"id", "scope", "scope id", "name", "source scheme", "destination scheme", "created at", "updated at"}
|
||||
tableWriter.AppendHeader(header)
|
||||
tableWriter.SetColumnConfigs(cliui.FilterTableColumns(header, filterColumns))
|
||||
tableWriter.SortBy([]table.SortBy{{
|
||||
Name: "name",
|
||||
}})
|
||||
for _, param := range params {
|
||||
tableWriter.AppendRow(table.Row{
|
||||
param.ID.String(),
|
||||
param.Scope,
|
||||
param.ScopeID.String(),
|
||||
param.Name,
|
||||
param.SourceScheme,
|
||||
param.DestinationScheme,
|
||||
param.CreatedAt,
|
||||
param.UpdatedAt,
|
||||
})
|
||||
}
|
||||
return tableWriter.Render()
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/cli/cliui"
|
||||
"github.com/coder/coder/codersdk"
|
||||
)
|
||||
|
||||
@@ -21,7 +22,7 @@ func parameterList() *cobra.Command {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
scope, name := args[0], args[1]
|
||||
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -70,11 +71,16 @@ func parameterList() *cobra.Command {
|
||||
return xerrors.Errorf("fetch params: %w", err)
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(cmd.OutOrStdout(), displayParameters(columns, params...))
|
||||
out, err := cliui.DisplayTable(params, "name", columns)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("render table: %w", err)
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(cmd.OutOrStdout(), out)
|
||||
return err
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringArrayVarP(&columns, "column", "c", []string{"name", "scope", "destination_scheme"},
|
||||
cmd.Flags().StringArrayVarP(&columns, "column", "c", []string{"name", "scope", "destination scheme"},
|
||||
"Specify a column to filter in the table.")
|
||||
return cmd
|
||||
}
|
||||
|
||||
+13
-8
@@ -55,6 +55,9 @@ func portForward() *cobra.Command {
|
||||
},
|
||||
),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx, cancel := context.WithCancel(cmd.Context())
|
||||
defer cancel()
|
||||
|
||||
specs, err := parsePortForwards(tcpForwards, udpForwards, unixForwards)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse port-forward specs: %w", err)
|
||||
@@ -67,12 +70,12 @@ func portForward() *cobra.Command {
|
||||
return xerrors.New("no port-forwards requested")
|
||||
}
|
||||
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
workspace, agent, err := getWorkspaceAndAgent(cmd, client, codersdk.Me, args[0], false)
|
||||
workspace, agent, err := getWorkspaceAndAgent(ctx, cmd, client, codersdk.Me, args[0], false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -80,13 +83,13 @@ func portForward() *cobra.Command {
|
||||
return xerrors.New("workspace must be in start transition to port-forward")
|
||||
}
|
||||
if workspace.LatestBuild.Job.CompletedAt == nil {
|
||||
err = cliui.WorkspaceBuild(cmd.Context(), cmd.ErrOrStderr(), client, workspace.LatestBuild.ID, workspace.CreatedAt)
|
||||
err = cliui.WorkspaceBuild(ctx, cmd.ErrOrStderr(), client, workspace.LatestBuild.ID, workspace.CreatedAt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = cliui.Agent(cmd.Context(), cmd.ErrOrStderr(), cliui.AgentOptions{
|
||||
err = cliui.Agent(ctx, cmd.ErrOrStderr(), cliui.AgentOptions{
|
||||
WorkspaceName: workspace.Name,
|
||||
Fetch: func(ctx context.Context) (codersdk.WorkspaceAgent, error) {
|
||||
return client.WorkspaceAgent(ctx, agent.ID)
|
||||
@@ -96,7 +99,7 @@ func portForward() *cobra.Command {
|
||||
return xerrors.Errorf("await agent: %w", err)
|
||||
}
|
||||
|
||||
conn, err := client.DialWorkspaceAgent(cmd.Context(), agent.ID, nil)
|
||||
conn, err := client.DialWorkspaceAgent(ctx, agent.ID, nil)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("dial workspace agent: %w", err)
|
||||
}
|
||||
@@ -104,7 +107,6 @@ func portForward() *cobra.Command {
|
||||
|
||||
// Start all listeners.
|
||||
var (
|
||||
ctx, cancel = context.WithCancel(cmd.Context())
|
||||
wg = new(sync.WaitGroup)
|
||||
listeners = make([]net.Listener, len(specs))
|
||||
closeAllListeners = func() {
|
||||
@@ -116,11 +118,11 @@ func portForward() *cobra.Command {
|
||||
}
|
||||
}
|
||||
)
|
||||
defer cancel()
|
||||
defer closeAllListeners()
|
||||
|
||||
for i, spec := range specs {
|
||||
l, err := listenAndPortForward(ctx, cmd, conn, wg, spec)
|
||||
if err != nil {
|
||||
closeAllListeners()
|
||||
return err
|
||||
}
|
||||
listeners[i] = l
|
||||
@@ -129,7 +131,10 @@ func portForward() *cobra.Command {
|
||||
// Wait for the context to be canceled or for a signal and close
|
||||
// all listeners.
|
||||
var closeErr error
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
|
||||
+1
-1
@@ -20,7 +20,7 @@ func publickey() *cobra.Command {
|
||||
Aliases: []string{"pubkey"},
|
||||
Short: "Output your public key for Git operations",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create codersdk client: %w", err)
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
func TestPublicKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, nil)
|
||||
_ = coderdtest.CreateFirstUser(t, client)
|
||||
cmd, root := clitest.New(t, "publickey")
|
||||
|
||||
@@ -0,0 +1,62 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/cli/cliui"
|
||||
"github.com/coder/coder/codersdk"
|
||||
)
|
||||
|
||||
func rename() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Annotations: workspaceCommand,
|
||||
Use: "rename <workspace> <new name>",
|
||||
Short: "Rename a workspace",
|
||||
Args: cobra.ExactArgs(2),
|
||||
// Keep hidden until renaming is safe, see:
|
||||
// * https://github.com/coder/coder/issues/3000
|
||||
// * https://github.com/coder/coder/issues/3386
|
||||
Hidden: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
workspace, err := namedWorkspace(cmd, client, args[0])
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get workspace: %w", err)
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(cmd.OutOrStdout(), "%s\n\n",
|
||||
cliui.Styles.Wrap.Render("WARNING: A rename can result in data loss if a resource references the workspace name in the template (e.g volumes)."),
|
||||
)
|
||||
_, err = cliui.Prompt(cmd, cliui.PromptOptions{
|
||||
Text: fmt.Sprintf("Type %q to confirm rename:", workspace.Name),
|
||||
Validate: func(s string) error {
|
||||
if s == workspace.Name {
|
||||
return nil
|
||||
}
|
||||
return xerrors.Errorf("Input %q does not match %q", s, workspace.Name)
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = client.UpdateWorkspace(cmd.Context(), workspace.ID, codersdk.UpdateWorkspaceRequest{
|
||||
Name: args[1],
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("rename workspace: %w", err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cliui.AllowSkipPrompt(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -0,0 +1,52 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/cli/clitest"
|
||||
"github.com/coder/coder/coderd/coderdtest"
|
||||
"github.com/coder/coder/pty/ptytest"
|
||||
"github.com/coder/coder/testutil"
|
||||
)
|
||||
|
||||
func TestRename(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerD: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, client, user.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJob(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
|
||||
want := workspace.Name + "-test"
|
||||
cmd, root := clitest.New(t, "rename", workspace.Name, want, "--yes")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
pty := ptytest.New(t)
|
||||
cmd.SetIn(pty.Input())
|
||||
cmd.SetOut(pty.Output())
|
||||
|
||||
errC := make(chan error, 1)
|
||||
go func() {
|
||||
errC <- cmd.ExecuteContext(ctx)
|
||||
}()
|
||||
|
||||
pty.ExpectMatch("confirm rename:")
|
||||
pty.WriteLine(workspace.Name)
|
||||
|
||||
require.NoError(t, <-errC)
|
||||
|
||||
ws, err := client.Workspace(ctx, workspace.ID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
got := ws.Name
|
||||
assert.Equal(t, want, got, "workspace name did not change")
|
||||
}
|
||||
+43
-32
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/coder/coder/cli/cliflag"
|
||||
"github.com/coder/coder/cli/cliui"
|
||||
"github.com/coder/coder/cli/config"
|
||||
"github.com/coder/coder/coderd"
|
||||
"github.com/coder/coder/codersdk"
|
||||
)
|
||||
|
||||
@@ -58,7 +59,43 @@ func init() {
|
||||
cobra.AddTemplateFuncs(templateFunctions)
|
||||
}
|
||||
|
||||
func Root() *cobra.Command {
|
||||
func Core() []*cobra.Command {
|
||||
return []*cobra.Command{
|
||||
configSSH(),
|
||||
create(),
|
||||
deleteWorkspace(),
|
||||
dotfiles(),
|
||||
gitssh(),
|
||||
list(),
|
||||
login(),
|
||||
logout(),
|
||||
parameters(),
|
||||
portForward(),
|
||||
publickey(),
|
||||
resetPassword(),
|
||||
schedules(),
|
||||
show(),
|
||||
ssh(),
|
||||
start(),
|
||||
state(),
|
||||
stop(),
|
||||
rename(),
|
||||
templates(),
|
||||
update(),
|
||||
users(),
|
||||
versionCmd(),
|
||||
wireguardPortForward(),
|
||||
workspaceAgent(),
|
||||
features(),
|
||||
}
|
||||
}
|
||||
|
||||
func AGPL() []*cobra.Command {
|
||||
all := append(Core(), Server(coderd.New))
|
||||
return all
|
||||
}
|
||||
|
||||
func Root(subcommands []*cobra.Command) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "coder",
|
||||
SilenceErrors: true,
|
||||
@@ -78,7 +115,7 @@ func Root() *cobra.Command {
|
||||
return nil
|
||||
}
|
||||
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
// If the client is unauthenticated we can ignore the check.
|
||||
// The child commands should handle an unauthenticated client.
|
||||
if xerrors.Is(err, errUnauthenticated) {
|
||||
@@ -109,33 +146,7 @@ func Root() *cobra.Command {
|
||||
),
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
configSSH(),
|
||||
create(),
|
||||
deleteWorkspace(),
|
||||
dotfiles(),
|
||||
gitssh(),
|
||||
list(),
|
||||
login(),
|
||||
logout(),
|
||||
parameters(),
|
||||
portForward(),
|
||||
publickey(),
|
||||
resetPassword(),
|
||||
schedules(),
|
||||
server(),
|
||||
show(),
|
||||
ssh(),
|
||||
start(),
|
||||
state(),
|
||||
stop(),
|
||||
templates(),
|
||||
update(),
|
||||
users(),
|
||||
versionCmd(),
|
||||
wireguardPortForward(),
|
||||
workspaceAgent(),
|
||||
)
|
||||
cmd.AddCommand(subcommands...)
|
||||
|
||||
cmd.SetUsageTemplate(usageTemplate())
|
||||
|
||||
@@ -180,9 +191,9 @@ func isTest() bool {
|
||||
return flag.Lookup("test.v") != nil
|
||||
}
|
||||
|
||||
// createClient returns a new client from the command context.
|
||||
// CreateClient returns a new client from the command context.
|
||||
// It reads from global configuration files if flags are not set.
|
||||
func createClient(cmd *cobra.Command) (*codersdk.Client, error) {
|
||||
func CreateClient(cmd *cobra.Command) (*codersdk.Client, error) {
|
||||
root := createConfig(cmd)
|
||||
rawURL, err := cmd.Flags().GetString(varURL)
|
||||
if err != nil || rawURL == "" {
|
||||
@@ -216,7 +227,7 @@ func createClient(cmd *cobra.Command) (*codersdk.Client, error) {
|
||||
}
|
||||
|
||||
// createAgentClient returns a new client from the command context.
|
||||
// It works just like createClient, but uses the agent token and URL instead.
|
||||
// It works just like CreateClient, but uses the agent token and URL instead.
|
||||
func createAgentClient(cmd *cobra.Command) (*codersdk.Client, error) {
|
||||
rawURL, err := cmd.Flags().GetString(varAgentURL)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
)
|
||||
|
||||
func Test_formatExamples(t *testing.T) {
|
||||
@@ -67,7 +67,11 @@ func Test_formatExamples(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// Replace with goleak.VerifyTestMain(m) when we enable goleak.
|
||||
os.Exit(m.Run())
|
||||
// goleak.VerifyTestMain(m)
|
||||
goleak.VerifyTestMain(m,
|
||||
// The lumberjack library is used by by agent and seems to leave
|
||||
// goroutines after Close(), fails TestGitSSH tests.
|
||||
// https://github.com/natefinch/lumberjack/pull/100
|
||||
goleak.IgnoreTopFunction("gopkg.in/natefinch/lumberjack%2ev2.(*Logger).millRun"),
|
||||
goleak.IgnoreTopFunction("gopkg.in/natefinch/lumberjack%2ev2.(*Logger).mill.func1"),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
)
|
||||
|
||||
func TestRoot(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Run("FormatCobraError", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
+6
-6
@@ -77,7 +77,7 @@ func scheduleShow() *cobra.Command {
|
||||
Long: scheduleShowDescriptionLong,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -106,7 +106,7 @@ func scheduleStart() *cobra.Command {
|
||||
Long: scheduleStartDescriptionLong,
|
||||
Args: cobra.RangeArgs(2, 4),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -156,7 +156,7 @@ func scheduleStop() *cobra.Command {
|
||||
Short: "Edit workspace stop schedule",
|
||||
Long: scheduleStopDescriptionLong,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -207,7 +207,7 @@ func scheduleOverride() *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create client: %w", err)
|
||||
}
|
||||
@@ -280,8 +280,8 @@ func displaySchedule(workspace codersdk.Workspace, out io.Writer) error {
|
||||
if workspace.LatestBuild.Transition != "start" {
|
||||
schedNextStop = "-"
|
||||
} else {
|
||||
schedNextStop = workspace.LatestBuild.Deadline.In(loc).Format(timeFormat + " on " + dateFormat)
|
||||
schedNextStop = fmt.Sprintf("%s (in %s)", schedNextStop, durationDisplay(time.Until(workspace.LatestBuild.Deadline)))
|
||||
schedNextStop = workspace.LatestBuild.Deadline.Time.In(loc).Format(timeFormat + " on " + dateFormat)
|
||||
schedNextStop = fmt.Sprintf("%s (in %s)", schedNextStop, durationDisplay(time.Until(workspace.LatestBuild.Deadline.Time)))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -239,7 +239,7 @@ func TestScheduleOverride(t *testing.T) {
|
||||
|
||||
// Assert test invariant: workspace build has a deadline set equal to now plus ttl
|
||||
initDeadline := time.Now().Add(time.Duration(*workspace.TTLMillis) * time.Millisecond)
|
||||
require.WithinDuration(t, initDeadline, workspace.LatestBuild.Deadline, time.Minute)
|
||||
require.WithinDuration(t, initDeadline, workspace.LatestBuild.Deadline.Time, time.Minute)
|
||||
|
||||
cmd, root := clitest.New(t, cmdArgs...)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
@@ -252,7 +252,7 @@ func TestScheduleOverride(t *testing.T) {
|
||||
// Then: the deadline of the latest build is updated assuming the units are minutes
|
||||
updated, err := client.Workspace(ctx, workspace.ID)
|
||||
require.NoError(t, err)
|
||||
require.WithinDuration(t, expectedDeadline, updated.LatestBuild.Deadline, time.Minute)
|
||||
require.WithinDuration(t, expectedDeadline, updated.LatestBuild.Deadline.Time, time.Minute)
|
||||
})
|
||||
|
||||
t.Run("InvalidDuration", func(t *testing.T) {
|
||||
@@ -279,7 +279,7 @@ func TestScheduleOverride(t *testing.T) {
|
||||
|
||||
// Assert test invariant: workspace build has a deadline set equal to now plus ttl
|
||||
initDeadline := time.Now().Add(time.Duration(*workspace.TTLMillis) * time.Millisecond)
|
||||
require.WithinDuration(t, initDeadline, workspace.LatestBuild.Deadline, time.Minute)
|
||||
require.WithinDuration(t, initDeadline, workspace.LatestBuild.Deadline.Time, time.Minute)
|
||||
|
||||
cmd, root := clitest.New(t, cmdArgs...)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
|
||||
+143
-39
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/pion/turn/v2"
|
||||
"github.com/pion/webrtc/v3"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -53,6 +54,7 @@ import (
|
||||
"github.com/coder/coder/coderd/database/databasefake"
|
||||
"github.com/coder/coder/coderd/devtunnel"
|
||||
"github.com/coder/coder/coderd/gitsshkey"
|
||||
"github.com/coder/coder/coderd/prometheusmetrics"
|
||||
"github.com/coder/coder/coderd/telemetry"
|
||||
"github.com/coder/coder/coderd/tracing"
|
||||
"github.com/coder/coder/coderd/turnconn"
|
||||
@@ -66,7 +68,7 @@ import (
|
||||
)
|
||||
|
||||
// nolint:gocyclo
|
||||
func server() *cobra.Command {
|
||||
func Server(newAPI func(*coderd.Options) *coderd.API) *cobra.Command {
|
||||
var (
|
||||
accessURL string
|
||||
address string
|
||||
@@ -85,6 +87,7 @@ func server() *cobra.Command {
|
||||
oauth2GithubAllowedOrganizations []string
|
||||
oauth2GithubAllowedTeams []string
|
||||
oauth2GithubAllowSignups bool
|
||||
oauth2GithubEnterpriseBaseURL string
|
||||
oidcAllowSignups bool
|
||||
oidcClientID string
|
||||
oidcClientSecret string
|
||||
@@ -105,6 +108,7 @@ func server() *cobra.Command {
|
||||
trace bool
|
||||
secureAuthCookie bool
|
||||
sshKeygenAlgorithmRaw string
|
||||
autoImportTemplates []string
|
||||
spooky bool
|
||||
verbose bool
|
||||
)
|
||||
@@ -124,6 +128,19 @@ func server() *cobra.Command {
|
||||
ctx, cancel := context.WithCancel(cmd.Context())
|
||||
defer cancel()
|
||||
|
||||
// Register signals early on so that graceful shutdown can't
|
||||
// be interrupted by additional signals. Note that we avoid
|
||||
// shadowing cancel() (from above) here because notifyStop()
|
||||
// restores default behavior for the signals. This protects
|
||||
// the shutdown sequence from abrubtly terminating things
|
||||
// like: database migrations, provisioner work, workspace
|
||||
// cleanup in dev-mode, etc.
|
||||
//
|
||||
// To get out of a graceful shutdown, the user can send
|
||||
// SIGQUIT with ctrl+\ or SIGKILL with `kill -9`.
|
||||
notifyCtx, notifyStop := signal.NotifyContext(ctx, interruptSignals...)
|
||||
defer notifyStop()
|
||||
|
||||
// Clean up idle connections at the end, e.g.
|
||||
// embedded-postgres can leave an idle connection
|
||||
// which is caught by goleaks.
|
||||
@@ -268,6 +285,28 @@ func server() *cobra.Command {
|
||||
URLs: []string{stunServer},
|
||||
})
|
||||
}
|
||||
|
||||
// Validate provided auto-import templates.
|
||||
var (
|
||||
validatedAutoImportTemplates = make([]coderd.AutoImportTemplate, len(autoImportTemplates))
|
||||
seenValidatedAutoImportTemplates = make(map[coderd.AutoImportTemplate]struct{}, len(autoImportTemplates))
|
||||
)
|
||||
for i, autoImportTemplate := range autoImportTemplates {
|
||||
var v coderd.AutoImportTemplate
|
||||
switch autoImportTemplate {
|
||||
case "kubernetes":
|
||||
v = coderd.AutoImportTemplateKubernetes
|
||||
default:
|
||||
return xerrors.Errorf("auto import template %q is not supported", autoImportTemplate)
|
||||
}
|
||||
|
||||
if _, ok := seenValidatedAutoImportTemplates[v]; ok {
|
||||
return xerrors.Errorf("auto import template %q is specified more than once", v)
|
||||
}
|
||||
seenValidatedAutoImportTemplates[v] = struct{}{}
|
||||
validatedAutoImportTemplates[i] = v
|
||||
}
|
||||
|
||||
options := &coderd.Options{
|
||||
AccessURL: accessURLParsed,
|
||||
ICEServers: iceServers,
|
||||
@@ -281,10 +320,11 @@ func server() *cobra.Command {
|
||||
TURNServer: turnServer,
|
||||
TracerProvider: tracerProvider,
|
||||
Telemetry: telemetry.NewNoop(),
|
||||
AutoImportTemplates: validatedAutoImportTemplates,
|
||||
}
|
||||
|
||||
if oauth2GithubClientSecret != "" {
|
||||
options.GithubOAuth2Config, err = configureGithubOAuth2(accessURLParsed, oauth2GithubClientID, oauth2GithubClientSecret, oauth2GithubAllowSignups, oauth2GithubAllowedOrganizations, oauth2GithubAllowedTeams)
|
||||
options.GithubOAuth2Config, err = configureGithubOAuth2(accessURLParsed, oauth2GithubClientID, oauth2GithubClientSecret, oauth2GithubAllowSignups, oauth2GithubAllowedOrganizations, oauth2GithubAllowedTeams, oauth2GithubEnterpriseBaseURL)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("configure github oauth2: %w", err)
|
||||
}
|
||||
@@ -392,7 +432,33 @@ func server() *cobra.Command {
|
||||
defer options.Telemetry.Close()
|
||||
}
|
||||
|
||||
coderAPI := coderd.New(options)
|
||||
// This prevents the pprof import from being accidentally deleted.
|
||||
_ = pprof.Handler
|
||||
if pprofEnabled {
|
||||
//nolint:revive
|
||||
defer serveHandler(ctx, logger, nil, pprofAddress, "pprof")()
|
||||
}
|
||||
if promEnabled {
|
||||
options.PrometheusRegistry = prometheus.NewRegistry()
|
||||
closeUsersFunc, err := prometheusmetrics.ActiveUsers(ctx, options.PrometheusRegistry, options.Database, 0)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("register active users prometheus metric: %w", err)
|
||||
}
|
||||
defer closeUsersFunc()
|
||||
|
||||
closeWorkspacesFunc, err := prometheusmetrics.Workspaces(ctx, options.PrometheusRegistry, options.Database, 0)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("register workspaces prometheus metric: %w", err)
|
||||
}
|
||||
defer closeWorkspacesFunc()
|
||||
|
||||
//nolint:revive
|
||||
defer serveHandler(ctx, logger, promhttp.InstrumentMetricHandler(
|
||||
options.PrometheusRegistry, promhttp.HandlerFor(options.PrometheusRegistry, promhttp.HandlerOpts{}),
|
||||
), promAddress, "prometheus")()
|
||||
}
|
||||
|
||||
coderAPI := newAPI(options)
|
||||
defer coderAPI.Close()
|
||||
|
||||
client := codersdk.New(localURL)
|
||||
@@ -406,17 +472,6 @@ func server() *cobra.Command {
|
||||
}
|
||||
}
|
||||
|
||||
// This prevents the pprof import from being accidentally deleted.
|
||||
_ = pprof.Handler
|
||||
if pprofEnabled {
|
||||
//nolint:revive
|
||||
defer serveHandler(ctx, logger, nil, pprofAddress, "pprof")()
|
||||
}
|
||||
if promEnabled {
|
||||
//nolint:revive
|
||||
defer serveHandler(ctx, logger, promhttp.Handler(), promAddress, "prometheus")()
|
||||
}
|
||||
|
||||
// Since errCh only has one buffered slot, all routines
|
||||
// sending on it must be wrapped in a select/default to
|
||||
// avoid leaving dangling goroutines waiting for the
|
||||
@@ -441,6 +496,11 @@ func server() *cobra.Command {
|
||||
|
||||
shutdownConnsCtx, shutdownConns := context.WithCancel(ctx)
|
||||
defer shutdownConns()
|
||||
|
||||
// ReadHeaderTimeout is purposefully not enabled. It caused some issues with
|
||||
// websockets over the dev tunnel.
|
||||
// See: https://github.com/coder/coder/pull/3730
|
||||
//nolint:gosec
|
||||
server := &http.Server{
|
||||
// These errors are typically noise like "TLS: EOF". Vault does similar:
|
||||
// https://github.com/hashicorp/vault/blob/e2490059d0711635e529a4efcbaa1b26998d6e1c/command/server.go#L2714
|
||||
@@ -503,22 +563,13 @@ func server() *cobra.Command {
|
||||
// such as via the systemd service.
|
||||
_ = config.URL().Write(client.URL.String())
|
||||
|
||||
// Because the graceful shutdown includes cleaning up workspaces in dev mode, we're
|
||||
// going to make it harder to accidentally skip the graceful shutdown by hitting ctrl+c
|
||||
// two or more times. So the stopChan is unlimited in size and we don't call
|
||||
// signal.Stop() until graceful shutdown finished--this means we swallow additional
|
||||
// SIGINT after the first. To get out of a graceful shutdown, the user can send SIGQUIT
|
||||
// with ctrl+\ or SIGTERM with `kill`.
|
||||
ctx, stop := signal.NotifyContext(ctx, os.Interrupt)
|
||||
defer stop()
|
||||
|
||||
// Currently there is no way to ask the server to shut
|
||||
// itself down, so any exit signal will result in a non-zero
|
||||
// exit of the server.
|
||||
var exitErr error
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
exitErr = ctx.Err()
|
||||
case <-notifyCtx.Done():
|
||||
exitErr = notifyCtx.Err()
|
||||
_, _ = fmt.Fprintln(cmd.OutOrStdout(), cliui.Styles.Bold.Render(
|
||||
"Interrupt caught, gracefully exiting. Use ctrl+\\ to force quit",
|
||||
))
|
||||
@@ -612,13 +663,13 @@ func server() *cobra.Command {
|
||||
root.AddCommand(&cobra.Command{
|
||||
Use: "postgres-builtin-url",
|
||||
Short: "Output the connection URL for the built-in PostgreSQL deployment.",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
cfg := createConfig(cmd)
|
||||
url, err := embeddedPostgresURL(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cmd.Println(cliui.Styles.Code.Render("psql \"" + url + "\""))
|
||||
_, _ = fmt.Fprintf(cmd.OutOrStdout(), "psql %q\n", url)
|
||||
return nil
|
||||
},
|
||||
})
|
||||
@@ -678,6 +729,8 @@ func server() *cobra.Command {
|
||||
"Specifies teams inside organizations the user must be a member of to authenticate with GitHub. Formatted as: <organization-name>/<team-slug>.")
|
||||
cliflag.BoolVarP(root.Flags(), &oauth2GithubAllowSignups, "oauth2-github-allow-signups", "", "CODER_OAUTH2_GITHUB_ALLOW_SIGNUPS", false,
|
||||
"Specifies whether new users can sign up with GitHub.")
|
||||
cliflag.StringVarP(root.Flags(), &oauth2GithubEnterpriseBaseURL, "oauth2-github-enterprise-base-url", "", "CODER_OAUTH2_GITHUB_ENTERPRISE_BASE_URL", "",
|
||||
"Specifies the base URL of a GitHub Enterprise instance to use for oauth2.")
|
||||
cliflag.BoolVarP(root.Flags(), &oidcAllowSignups, "oidc-allow-signups", "", "CODER_OIDC_ALLOW_SIGNUPS", true,
|
||||
"Specifies whether new users can sign up with OIDC.")
|
||||
cliflag.StringVarP(root.Flags(), &oidcClientID, "oidc-client-id", "", "CODER_OIDC_CLIENT_ID", "",
|
||||
@@ -719,6 +772,7 @@ func server() *cobra.Command {
|
||||
cliflag.BoolVarP(root.Flags(), &secureAuthCookie, "secure-auth-cookie", "", "CODER_SECURE_AUTH_COOKIE", false, "Specifies if the 'Secure' property is set on browser session cookies")
|
||||
cliflag.StringVarP(root.Flags(), &sshKeygenAlgorithmRaw, "ssh-keygen-algorithm", "", "CODER_SSH_KEYGEN_ALGORITHM", "ed25519", "Specifies the algorithm to use for generating ssh keys. "+
|
||||
`Accepted values are "ed25519", "ecdsa", or "rsa4096"`)
|
||||
cliflag.StringArrayVarP(root.Flags(), &autoImportTemplates, "auto-import-template", "", "CODER_TEMPLATE_AUTOIMPORT", []string{}, "Which templates to auto-import. Available auto-importable templates are: kubernetes")
|
||||
cliflag.BoolVarP(root.Flags(), &spooky, "spooky", "", "", false, "Specifies spookiness level")
|
||||
cliflag.BoolVarP(root.Flags(), &verbose, "verbose", "v", "CODER_VERBOSE", false, "Enables verbose logging.")
|
||||
_ = root.Flags().MarkHidden("spooky")
|
||||
@@ -861,16 +915,16 @@ func newProvisionerDaemon(ctx context.Context, coderAPI *coderd.API,
|
||||
// nolint: revive
|
||||
func printLogo(cmd *cobra.Command, spooky bool) {
|
||||
if spooky {
|
||||
_, _ = fmt.Fprintf(cmd.OutOrStdout(), `▄████▄ ▒█████ ▓█████▄ ▓█████ ██▀███
|
||||
_, _ = fmt.Fprintf(cmd.OutOrStdout(), `▄████▄ ▒█████ ▓█████▄ ▓█████ ██▀███
|
||||
▒██▀ ▀█ ▒██▒ ██▒▒██▀ ██▌▓█ ▀ ▓██ ▒ ██▒
|
||||
▒▓█ ▄ ▒██░ ██▒░██ █▌▒███ ▓██ ░▄█ ▒
|
||||
▒▓▓▄ ▄██▒▒██ ██░░▓█▄ ▌▒▓█ ▄ ▒██▀▀█▄
|
||||
▒▓▓▄ ▄██▒▒██ ██░░▓█▄ ▌▒▓█ ▄ ▒██▀▀█▄
|
||||
▒ ▓███▀ ░░ ████▓▒░░▒████▓ ░▒████▒░██▓ ▒██▒
|
||||
░ ░▒ ▒ ░░ ▒░▒░▒░ ▒▒▓ ▒ ░░ ▒░ ░░ ▒▓ ░▒▓░
|
||||
░ ▒ ░ ▒ ▒░ ░ ▒ ▒ ░ ░ ░ ░▒ ░ ▒░
|
||||
░ ░ ░ ░ ▒ ░ ░ ░ ░ ░░ ░
|
||||
░ ░ ░ ░ ░ ░ ░ ░
|
||||
░ ░
|
||||
░ ░ ░ ░ ▒ ░ ░ ░ ░ ░░ ░
|
||||
░ ░ ░ ░ ░ ░ ░ ░
|
||||
░ ░
|
||||
`)
|
||||
return
|
||||
}
|
||||
@@ -955,7 +1009,7 @@ func configureTLS(listener net.Listener, tlsMinVersion, tlsClientAuth, tlsCertFi
|
||||
return tls.NewListener(listener, tlsConfig), nil
|
||||
}
|
||||
|
||||
func configureGithubOAuth2(accessURL *url.URL, clientID, clientSecret string, allowSignups bool, allowOrgs []string, rawTeams []string) (*coderd.GithubOAuth2Config, error) {
|
||||
func configureGithubOAuth2(accessURL *url.URL, clientID, clientSecret string, allowSignups bool, allowOrgs []string, rawTeams []string, enterpriseBaseURL string) (*coderd.GithubOAuth2Config, error) {
|
||||
redirectURL, err := accessURL.Parse("/api/v2/users/oauth2/github/callback")
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse github oauth callback url: %w", err)
|
||||
@@ -971,11 +1025,38 @@ func configureGithubOAuth2(accessURL *url.URL, clientID, clientSecret string, al
|
||||
Slug: parts[1],
|
||||
})
|
||||
}
|
||||
createClient := func(client *http.Client) (*github.Client, error) {
|
||||
if enterpriseBaseURL != "" {
|
||||
return github.NewEnterpriseClient(enterpriseBaseURL, "", client)
|
||||
}
|
||||
return github.NewClient(client), nil
|
||||
}
|
||||
|
||||
endpoint := xgithub.Endpoint
|
||||
if enterpriseBaseURL != "" {
|
||||
enterpriseURL, err := url.Parse(enterpriseBaseURL)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse enterprise base url: %w", err)
|
||||
}
|
||||
authURL, err := enterpriseURL.Parse("/login/oauth/authorize")
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse enterprise auth url: %w", err)
|
||||
}
|
||||
tokenURL, err := enterpriseURL.Parse("/login/oauth/access_token")
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse enterprise token url: %w", err)
|
||||
}
|
||||
endpoint = oauth2.Endpoint{
|
||||
AuthURL: authURL.String(),
|
||||
TokenURL: tokenURL.String(),
|
||||
}
|
||||
}
|
||||
|
||||
return &coderd.GithubOAuth2Config{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
ClientID: clientID,
|
||||
ClientSecret: clientSecret,
|
||||
Endpoint: xgithub.Endpoint,
|
||||
Endpoint: endpoint,
|
||||
RedirectURL: redirectURL.String(),
|
||||
Scopes: []string{
|
||||
"read:user",
|
||||
@@ -987,15 +1068,27 @@ func configureGithubOAuth2(accessURL *url.URL, clientID, clientSecret string, al
|
||||
AllowOrganizations: allowOrgs,
|
||||
AllowTeams: allowTeams,
|
||||
AuthenticatedUser: func(ctx context.Context, client *http.Client) (*github.User, error) {
|
||||
user, _, err := github.NewClient(client).Users.Get(ctx, "")
|
||||
api, err := createClient(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
user, _, err := api.Users.Get(ctx, "")
|
||||
return user, err
|
||||
},
|
||||
ListEmails: func(ctx context.Context, client *http.Client) ([]*github.UserEmail, error) {
|
||||
emails, _, err := github.NewClient(client).Users.ListEmails(ctx, &github.ListOptions{})
|
||||
api, err := createClient(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
emails, _, err := api.Users.ListEmails(ctx, &github.ListOptions{})
|
||||
return emails, err
|
||||
},
|
||||
ListOrganizationMemberships: func(ctx context.Context, client *http.Client) ([]*github.Membership, error) {
|
||||
memberships, _, err := github.NewClient(client).Organizations.ListOrgMemberships(ctx, &github.ListOrgMembershipsOptions{
|
||||
api, err := createClient(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
memberships, _, err := api.Organizations.ListOrgMemberships(ctx, &github.ListOrgMembershipsOptions{
|
||||
State: "active",
|
||||
ListOptions: github.ListOptions{
|
||||
PerPage: 100,
|
||||
@@ -1004,7 +1097,11 @@ func configureGithubOAuth2(accessURL *url.URL, clientID, clientSecret string, al
|
||||
return memberships, err
|
||||
},
|
||||
TeamMembership: func(ctx context.Context, client *http.Client, org, teamSlug, username string) (*github.Membership, error) {
|
||||
team, _, err := github.NewClient(client).Teams.GetTeamMembershipBySlug(ctx, org, teamSlug, username)
|
||||
api, err := createClient(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
team, _, err := api.Teams.GetTeamMembershipBySlug(ctx, org, teamSlug, username)
|
||||
return team, err
|
||||
},
|
||||
}, nil
|
||||
@@ -1013,7 +1110,14 @@ func configureGithubOAuth2(accessURL *url.URL, clientID, clientSecret string, al
|
||||
func serveHandler(ctx context.Context, logger slog.Logger, handler http.Handler, addr, name string) (closeFunc func()) {
|
||||
logger.Debug(ctx, "http server listening", slog.F("addr", addr), slog.F("name", name))
|
||||
|
||||
srv := &http.Server{Addr: addr, Handler: handler}
|
||||
// ReadHeaderTimeout is purposefully not enabled. It caused some issues with
|
||||
// websockets over the dev tunnel.
|
||||
// See: https://github.com/coder/coder/pull/3730
|
||||
//nolint:gosec
|
||||
srv := &http.Server{
|
||||
Addr: addr,
|
||||
Handler: handler,
|
||||
}
|
||||
go func() {
|
||||
err := srv.ListenAndServe()
|
||||
if err != nil && !xerrors.Is(err, http.ErrServerClosed) {
|
||||
|
||||
+100
-1
@@ -1,6 +1,7 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
@@ -10,6 +11,7 @@ import (
|
||||
"crypto/x509/pkix"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -17,10 +19,13 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
|
||||
@@ -34,7 +39,7 @@ import (
|
||||
)
|
||||
|
||||
// This cannot be ran in parallel because it uses a signal.
|
||||
// nolint:paralleltest
|
||||
// nolint:tparallel,paralleltest
|
||||
func TestServer(t *testing.T) {
|
||||
t.Run("Production", func(t *testing.T) {
|
||||
if runtime.GOOS != "linux" || testing.Short() {
|
||||
@@ -374,6 +379,100 @@ func TestServer(t *testing.T) {
|
||||
cancelFunc()
|
||||
<-errC
|
||||
})
|
||||
t.Run("Prometheus", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
defer cancelFunc()
|
||||
|
||||
random, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
_ = random.Close()
|
||||
tcpAddr, valid := random.Addr().(*net.TCPAddr)
|
||||
require.True(t, valid)
|
||||
randomPort := tcpAddr.Port
|
||||
|
||||
root, cfg := clitest.New(t,
|
||||
"server",
|
||||
"--in-memory",
|
||||
"--address", ":0",
|
||||
"--provisioner-daemons", "1",
|
||||
"--prometheus-enable",
|
||||
"--prometheus-address", ":"+strconv.Itoa(randomPort),
|
||||
"--cache-dir", t.TempDir(),
|
||||
)
|
||||
serverErr := make(chan error, 1)
|
||||
go func() {
|
||||
serverErr <- root.ExecuteContext(ctx)
|
||||
}()
|
||||
_ = waitAccessURL(t, cfg)
|
||||
|
||||
var res *http.Response
|
||||
require.Eventually(t, func() bool {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("http://127.0.0.1:%d", randomPort), nil)
|
||||
assert.NoError(t, err)
|
||||
// nolint:bodyclose
|
||||
res, err = http.DefaultClient.Do(req)
|
||||
return err == nil
|
||||
}, testutil.WaitShort, testutil.IntervalFast)
|
||||
|
||||
scanner := bufio.NewScanner(res.Body)
|
||||
hasActiveUsers := false
|
||||
hasWorkspaces := false
|
||||
for scanner.Scan() {
|
||||
// This metric is manually registered to be tracked in the server. That's
|
||||
// why we test it's tracked here.
|
||||
if strings.HasPrefix(scanner.Text(), "coderd_api_active_users_duration_hour") {
|
||||
hasActiveUsers = true
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(scanner.Text(), "coderd_api_workspace_latest_build_total") {
|
||||
hasWorkspaces = true
|
||||
continue
|
||||
}
|
||||
t.Logf("scanned %s", scanner.Text())
|
||||
}
|
||||
require.NoError(t, scanner.Err())
|
||||
require.True(t, hasActiveUsers)
|
||||
require.True(t, hasWorkspaces)
|
||||
cancelFunc()
|
||||
<-serverErr
|
||||
})
|
||||
t.Run("GitHubOAuth", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
defer cancelFunc()
|
||||
|
||||
fakeRedirect := "https://fake-url.com"
|
||||
root, cfg := clitest.New(t,
|
||||
"server",
|
||||
"--in-memory",
|
||||
"--address", ":0",
|
||||
"--oauth2-github-client-id", "fake",
|
||||
"--oauth2-github-client-secret", "fake",
|
||||
"--oauth2-github-enterprise-base-url", fakeRedirect,
|
||||
)
|
||||
serverErr := make(chan error, 1)
|
||||
go func() {
|
||||
serverErr <- root.ExecuteContext(ctx)
|
||||
}()
|
||||
accessURL := waitAccessURL(t, cfg)
|
||||
client := codersdk.New(accessURL)
|
||||
client.HTTPClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
githubURL, err := accessURL.Parse("/api/v2/users/oauth2/github")
|
||||
require.NoError(t, err)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, githubURL.String(), nil)
|
||||
require.NoError(t, err)
|
||||
res, err := client.HTTPClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
defer res.Body.Close()
|
||||
fakeURL, err := res.Location()
|
||||
require.NoError(t, err)
|
||||
require.True(t, strings.HasPrefix(fakeURL.String(), fakeRedirect), fakeURL.String())
|
||||
cancelFunc()
|
||||
<-serverErr
|
||||
})
|
||||
}
|
||||
|
||||
func generateTLSCertificate(t testing.TB) (certPath, keyPath string) {
|
||||
|
||||
+1
-1
@@ -14,7 +14,7 @@ func show() *cobra.Command {
|
||||
Short: "Show details of a workspace's resources and agents",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
//go:build !windows
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var interruptSignals = []os.Signal{
|
||||
os.Interrupt,
|
||||
syscall.SIGTERM,
|
||||
syscall.SIGHUP,
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
//go:build windows
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
var interruptSignals = []os.Signal{os.Interrupt}
|
||||
+55
-39
@@ -33,8 +33,10 @@ import (
|
||||
"github.com/coder/coder/peer/peerwg"
|
||||
)
|
||||
|
||||
var workspacePollInterval = time.Minute
|
||||
var autostopNotifyCountdown = []time.Duration{30 * time.Minute}
|
||||
var (
|
||||
workspacePollInterval = time.Minute
|
||||
autostopNotifyCountdown = []time.Duration{30 * time.Minute}
|
||||
)
|
||||
|
||||
func ssh() *cobra.Command {
|
||||
var (
|
||||
@@ -51,7 +53,10 @@ func ssh() *cobra.Command {
|
||||
Short: "SSH into a workspace",
|
||||
Args: cobra.ArbitraryArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
ctx, cancel := context.WithCancel(cmd.Context())
|
||||
defer cancel()
|
||||
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -68,14 +73,14 @@ func ssh() *cobra.Command {
|
||||
}
|
||||
}
|
||||
|
||||
workspace, workspaceAgent, err := getWorkspaceAndAgent(cmd, client, codersdk.Me, args[0], shuffle)
|
||||
workspace, workspaceAgent, err := getWorkspaceAndAgent(ctx, cmd, client, codersdk.Me, args[0], shuffle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// OpenSSH passes stderr directly to the calling TTY.
|
||||
// This is required in "stdio" mode so a connecting indicator can be displayed.
|
||||
err = cliui.Agent(cmd.Context(), cmd.ErrOrStderr(), cliui.AgentOptions{
|
||||
err = cliui.Agent(ctx, cmd.ErrOrStderr(), cliui.AgentOptions{
|
||||
WorkspaceName: workspace.Name,
|
||||
Fetch: func(ctx context.Context) (codersdk.WorkspaceAgent, error) {
|
||||
return client.WorkspaceAgent(ctx, workspaceAgent.ID)
|
||||
@@ -85,19 +90,16 @@ func ssh() *cobra.Command {
|
||||
return xerrors.Errorf("await agent: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
sshClient *gossh.Client
|
||||
sshSession *gossh.Session
|
||||
)
|
||||
var newSSHClient func() (*gossh.Client, error)
|
||||
|
||||
if !wireguard {
|
||||
conn, err := client.DialWorkspaceAgent(cmd.Context(), workspaceAgent.ID, nil)
|
||||
conn, err := client.DialWorkspaceAgent(ctx, workspaceAgent.ID, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
stopPolling := tryPollWorkspaceAutostop(cmd.Context(), client, workspace)
|
||||
stopPolling := tryPollWorkspaceAutostop(ctx, client, workspace)
|
||||
defer stopPolling()
|
||||
|
||||
if stdio {
|
||||
@@ -105,6 +107,8 @@ func ssh() *cobra.Command {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rawSSH.Close()
|
||||
|
||||
go func() {
|
||||
_, _ = io.Copy(cmd.OutOrStdout(), rawSSH)
|
||||
}()
|
||||
@@ -112,15 +116,7 @@ func ssh() *cobra.Command {
|
||||
return nil
|
||||
}
|
||||
|
||||
sshClient, err = conn.SSHClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sshSession, err = sshClient.NewSession()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newSSHClient = conn.SSHClient
|
||||
} else {
|
||||
// TODO: more granual control of Tailscale logging.
|
||||
peerwg.Logf = tslogger.Discard
|
||||
@@ -133,8 +129,9 @@ func ssh() *cobra.Command {
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create wireguard network: %w", err)
|
||||
}
|
||||
defer wgn.Close()
|
||||
|
||||
err = client.PostWireguardPeer(cmd.Context(), workspace.ID, peerwg.Handshake{
|
||||
err = client.PostWireguardPeer(ctx, workspace.ID, peerwg.Handshake{
|
||||
Recipient: workspaceAgent.ID,
|
||||
NodePublicKey: wgn.NodePrivateKey.Public(),
|
||||
DiscoPublicKey: wgn.DiscoPublicKey,
|
||||
@@ -155,10 +152,11 @@ func ssh() *cobra.Command {
|
||||
}
|
||||
|
||||
if stdio {
|
||||
rawSSH, err := wgn.SSH(cmd.Context(), workspaceAgent.IPv6.IP())
|
||||
rawSSH, err := wgn.SSH(ctx, workspaceAgent.IPv6.IP())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rawSSH.Close()
|
||||
|
||||
go func() {
|
||||
_, _ = io.Copy(cmd.OutOrStdout(), rawSSH)
|
||||
@@ -167,17 +165,30 @@ func ssh() *cobra.Command {
|
||||
return nil
|
||||
}
|
||||
|
||||
sshClient, err = wgn.SSHClient(cmd.Context(), workspaceAgent.IPv6.IP())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sshSession, err = sshClient.NewSession()
|
||||
if err != nil {
|
||||
return err
|
||||
newSSHClient = func() (*gossh.Client, error) {
|
||||
return wgn.SSHClient(ctx, workspaceAgent.IPv6.IP())
|
||||
}
|
||||
}
|
||||
|
||||
sshClient, err := newSSHClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sshClient.Close()
|
||||
|
||||
sshSession, err := sshClient.NewSession()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sshSession.Close()
|
||||
|
||||
// Ensure context cancellation is propagated to the
|
||||
// SSH session, e.g. to cancel `Wait()` at the end.
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
_ = sshSession.Close()
|
||||
}()
|
||||
|
||||
if identityAgent == "" {
|
||||
identityAgent = os.Getenv("SSH_AUTH_SOCK")
|
||||
}
|
||||
@@ -203,15 +214,18 @@ func ssh() *cobra.Command {
|
||||
_ = term.Restore(int(stdinFile.Fd()), state)
|
||||
}()
|
||||
|
||||
windowChange := listenWindowSize(cmd.Context())
|
||||
windowChange := listenWindowSize(ctx)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-cmd.Context().Done():
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-windowChange:
|
||||
}
|
||||
width, height, _ := term.GetSize(int(stdoutFile.Fd()))
|
||||
width, height, err := term.GetSize(int(stdoutFile.Fd()))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
_ = sshSession.WindowChange(height, width)
|
||||
}
|
||||
}()
|
||||
@@ -224,13 +238,17 @@ func ssh() *cobra.Command {
|
||||
|
||||
sshSession.Stdin = cmd.InOrStdin()
|
||||
sshSession.Stdout = cmd.OutOrStdout()
|
||||
sshSession.Stderr = cmd.OutOrStdout()
|
||||
sshSession.Stderr = cmd.ErrOrStderr()
|
||||
|
||||
err = sshSession.Shell()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Put cancel at the top of the defer stack to initiate
|
||||
// shutdown of services.
|
||||
defer cancel()
|
||||
|
||||
err = sshSession.Wait()
|
||||
if err != nil {
|
||||
// If the connection drops unexpectedly, we get an ExitMissingError but no other
|
||||
@@ -259,16 +277,14 @@ func ssh() *cobra.Command {
|
||||
// getWorkspaceAgent returns the workspace and agent selected using either the
|
||||
// `<workspace>[.<agent>]` syntax via `in` or picks a random workspace and agent
|
||||
// if `shuffle` is true.
|
||||
func getWorkspaceAndAgent(cmd *cobra.Command, client *codersdk.Client, userID string, in string, shuffle bool) (codersdk.Workspace, codersdk.WorkspaceAgent, error) { //nolint:revive
|
||||
ctx := cmd.Context()
|
||||
|
||||
func getWorkspaceAndAgent(ctx context.Context, cmd *cobra.Command, client *codersdk.Client, userID string, in string, shuffle bool) (codersdk.Workspace, codersdk.WorkspaceAgent, error) { //nolint:revive
|
||||
var (
|
||||
workspace codersdk.Workspace
|
||||
workspaceParts = strings.Split(in, ".")
|
||||
err error
|
||||
)
|
||||
if shuffle {
|
||||
workspaces, err := client.Workspaces(cmd.Context(), codersdk.WorkspaceFilter{
|
||||
workspaces, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{
|
||||
Owner: codersdk.Me,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -371,7 +387,7 @@ func notifyCondition(ctx context.Context, client *codersdk.Client, workspaceID u
|
||||
return time.Time{}, nil
|
||||
}
|
||||
|
||||
deadline = ws.LatestBuild.Deadline
|
||||
deadline = ws.LatestBuild.Deadline.Time
|
||||
callback = func() {
|
||||
ttl := deadline.Sub(now)
|
||||
var title, body string
|
||||
|
||||
+13
-21
@@ -86,6 +86,7 @@ func TestSSH(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
pty.ExpectMatch("Waiting")
|
||||
|
||||
agentClient := codersdk.New(client.URL)
|
||||
agentClient.SessionToken = agentToken
|
||||
agentCloser := agent.New(agentClient.ListenWorkspaceAgent, &agent.Options{
|
||||
@@ -97,9 +98,6 @@ func TestSSH(t *testing.T) {
|
||||
|
||||
// Shells on Mac, Windows, and Linux all exit shells with the "exit" command.
|
||||
pty.WriteLine("exit")
|
||||
// Read output to prevent hang on macOS, see:
|
||||
// https://github.com/coder/coder/issues/2122
|
||||
pty.ExpectMatch("exit")
|
||||
<-cmdDone
|
||||
})
|
||||
t.Run("Stdio", func(t *testing.T) {
|
||||
@@ -173,17 +171,13 @@ func TestSSH(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, workspace, agentToken := setupWorkspaceForSSH(t)
|
||||
_, _ = tGoContext(t, func(ctx context.Context) {
|
||||
// Run this async so the SSH command has to wait for
|
||||
// the build and agent to connect!
|
||||
agentClient := codersdk.New(client.URL)
|
||||
agentClient.SessionToken = agentToken
|
||||
agentCloser := agent.New(agentClient.ListenWorkspaceAgent, &agent.Options{
|
||||
Logger: slogtest.Make(t, nil).Leveled(slog.LevelDebug),
|
||||
})
|
||||
<-ctx.Done()
|
||||
_ = agentCloser.Close()
|
||||
|
||||
agentClient := codersdk.New(client.URL)
|
||||
agentClient.SessionToken = agentToken
|
||||
agentCloser := agent.New(agentClient.ListenWorkspaceAgent, &agent.Options{
|
||||
Logger: slogtest.Make(t, nil).Leveled(slog.LevelDebug),
|
||||
})
|
||||
defer agentCloser.Close()
|
||||
|
||||
// Generate private key.
|
||||
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
@@ -204,15 +198,16 @@ func TestSSH(t *testing.T) {
|
||||
fd, err := l.Accept()
|
||||
if err != nil {
|
||||
if !errors.Is(err, net.ErrClosed) {
|
||||
t.Logf("accept error: %v", err)
|
||||
assert.NoError(t, err, "listener accept failed")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err = gosshagent.ServeAgent(kr, fd)
|
||||
if !errors.Is(err, io.EOF) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, err, "serve agent failed")
|
||||
}
|
||||
_ = fd.Close()
|
||||
}
|
||||
})
|
||||
|
||||
@@ -229,10 +224,10 @@ func TestSSH(t *testing.T) {
|
||||
pty := ptytest.New(t)
|
||||
cmd.SetIn(pty.Input())
|
||||
cmd.SetOut(pty.Output())
|
||||
cmd.SetErr(io.Discard)
|
||||
cmd.SetErr(pty.Output())
|
||||
cmdDone := tGo(t, func() {
|
||||
err := cmd.ExecuteContext(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, err, "ssh command failed")
|
||||
})
|
||||
|
||||
// Ensure that SSH_AUTH_SOCK is set.
|
||||
@@ -243,14 +238,11 @@ func TestSSH(t *testing.T) {
|
||||
// Ensure that ssh-add lists our key.
|
||||
pty.WriteLine("ssh-add -L")
|
||||
keys, err := kr.List()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "list keys failed")
|
||||
pty.ExpectMatch(keys[0].String())
|
||||
|
||||
// And we're done.
|
||||
pty.WriteLine("exit")
|
||||
// Read output to prevent hang on macOS, see:
|
||||
// https://github.com/coder/coder/issues/2122
|
||||
pty.ExpectMatch("exit")
|
||||
<-cmdDone
|
||||
})
|
||||
}
|
||||
|
||||
+1
-9
@@ -17,15 +17,7 @@ func start() *cobra.Command {
|
||||
Short: "Build a workspace with the start state",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
_, err := cliui.Prompt(cmd, cliui.PromptOptions{
|
||||
Text: "Confirm start workspace?",
|
||||
IsConfirm: true,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
+4
-3
@@ -1,6 +1,7 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
@@ -26,7 +27,7 @@ func statePull() *cobra.Command {
|
||||
Use: "pull <workspace> [file]",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -50,7 +51,7 @@ func statePull() *cobra.Command {
|
||||
}
|
||||
|
||||
if len(args) < 2 {
|
||||
cmd.Println(string(state))
|
||||
_, _ = fmt.Fprintln(cmd.OutOrStdout(), string(state))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -67,7 +68,7 @@ func statePush() *cobra.Command {
|
||||
Use: "push <workspace> <file>",
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
+1
-1
@@ -25,7 +25,7 @@ func stop() *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func templateCreate() *cobra.Command {
|
||||
Short: "Create a template from the current directory or as specified by flag",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ func templateDelete() *cobra.Command {
|
||||
templates = []codersdk.Template{}
|
||||
)
|
||||
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
+9
-3
@@ -13,7 +13,9 @@ import (
|
||||
|
||||
func templateEdit() *cobra.Command {
|
||||
var (
|
||||
name string
|
||||
description string
|
||||
icon string
|
||||
maxTTL time.Duration
|
||||
minAutostartInterval time.Duration
|
||||
)
|
||||
@@ -23,7 +25,7 @@ func templateEdit() *cobra.Command {
|
||||
Args: cobra.ExactArgs(1),
|
||||
Short: "Edit the metadata of a template by name.",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create client: %w", err)
|
||||
}
|
||||
@@ -38,7 +40,9 @@ func templateEdit() *cobra.Command {
|
||||
|
||||
// NOTE: coderd will ignore empty fields.
|
||||
req := codersdk.UpdateTemplateMeta{
|
||||
Name: name,
|
||||
Description: description,
|
||||
Icon: icon,
|
||||
MaxTTLMillis: maxTTL.Milliseconds(),
|
||||
MinAutostartIntervalMillis: minAutostartInterval.Milliseconds(),
|
||||
}
|
||||
@@ -52,9 +56,11 @@ func templateEdit() *cobra.Command {
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().StringVarP(&name, "name", "", "", "Edit the template name")
|
||||
cmd.Flags().StringVarP(&description, "description", "", "", "Edit the template description")
|
||||
cmd.Flags().DurationVarP(&maxTTL, "max_ttl", "", 0, "Edit the template maximum time before shutdown")
|
||||
cmd.Flags().DurationVarP(&minAutostartInterval, "min_autostart_interval", "", 0, "Edit the template minimum autostart interval")
|
||||
cmd.Flags().StringVarP(&icon, "icon", "", "", "Edit the template icon path")
|
||||
cmd.Flags().DurationVarP(&maxTTL, "max-ttl", "", 0, "Edit the template maximum time before shutdown - workspaces created from this template cannot stay running longer than this.")
|
||||
cmd.Flags().DurationVarP(&minAutostartInterval, "min-autostart-interval", "", 0, "Edit the template minimum autostart interval - workspaces created from this template must wait at least this long between autostarts.")
|
||||
cliui.AllowSkipPrompt(cmd)
|
||||
|
||||
return cmd
|
||||
|
||||
@@ -25,21 +25,26 @@ func TestTemplateEdit(t *testing.T) {
|
||||
_ = coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID, func(ctr *codersdk.CreateTemplateRequest) {
|
||||
ctr.Description = "original description"
|
||||
ctr.Icon = "/icons/default-icon.png"
|
||||
ctr.MaxTTLMillis = ptr.Ref(24 * time.Hour.Milliseconds())
|
||||
ctr.MinAutostartIntervalMillis = ptr.Ref(time.Hour.Milliseconds())
|
||||
})
|
||||
|
||||
// Test the cli command.
|
||||
name := "new-template-name"
|
||||
desc := "lorem ipsum dolor sit amet et cetera"
|
||||
icon := "/icons/new-icon.png"
|
||||
maxTTL := 12 * time.Hour
|
||||
minAutostartInterval := time.Minute
|
||||
cmdArgs := []string{
|
||||
"templates",
|
||||
"edit",
|
||||
template.Name,
|
||||
"--name", name,
|
||||
"--description", desc,
|
||||
"--max_ttl", maxTTL.String(),
|
||||
"--min_autostart_interval", minAutostartInterval.String(),
|
||||
"--icon", icon,
|
||||
"--max-ttl", maxTTL.String(),
|
||||
"--min-autostart-interval", minAutostartInterval.String(),
|
||||
}
|
||||
cmd, root := clitest.New(t, cmdArgs...)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
@@ -51,7 +56,9 @@ func TestTemplateEdit(t *testing.T) {
|
||||
// Assert that the template metadata changed.
|
||||
updated, err := client.Template(context.Background(), template.ID)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, name, updated.Name)
|
||||
assert.Equal(t, desc, updated.Description)
|
||||
assert.Equal(t, icon, updated.Icon)
|
||||
assert.Equal(t, maxTTL.Milliseconds(), updated.MaxTTLMillis)
|
||||
assert.Equal(t, minAutostartInterval.Milliseconds(), updated.MinAutostartIntervalMillis)
|
||||
})
|
||||
@@ -64,6 +71,7 @@ func TestTemplateEdit(t *testing.T) {
|
||||
_ = coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID, func(ctr *codersdk.CreateTemplateRequest) {
|
||||
ctr.Description = "original description"
|
||||
ctr.Icon = "/icons/default-icon.png"
|
||||
ctr.MaxTTLMillis = ptr.Ref(24 * time.Hour.Milliseconds())
|
||||
ctr.MinAutostartIntervalMillis = ptr.Ref(time.Hour.Milliseconds())
|
||||
})
|
||||
@@ -73,9 +81,11 @@ func TestTemplateEdit(t *testing.T) {
|
||||
"templates",
|
||||
"edit",
|
||||
template.Name,
|
||||
"--name", template.Name,
|
||||
"--description", template.Description,
|
||||
"--max_ttl", (time.Duration(template.MaxTTLMillis) * time.Millisecond).String(),
|
||||
"--min_autostart_interval", (time.Duration(template.MinAutostartIntervalMillis) * time.Millisecond).String(),
|
||||
"--icon", template.Icon,
|
||||
"--max-ttl", (time.Duration(template.MaxTTLMillis) * time.Millisecond).String(),
|
||||
"--min-autostart-interval", (time.Duration(template.MinAutostartIntervalMillis) * time.Millisecond).String(),
|
||||
}
|
||||
cmd, root := clitest.New(t, cmdArgs...)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
@@ -87,7 +97,9 @@ func TestTemplateEdit(t *testing.T) {
|
||||
// Assert that the template metadata did not change.
|
||||
updated, err := client.Template(context.Background(), template.ID)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, template.Name, updated.Name)
|
||||
assert.Equal(t, template.Description, updated.Description)
|
||||
assert.Equal(t, template.Icon, updated.Icon)
|
||||
assert.Equal(t, template.MaxTTLMillis, updated.MaxTTLMillis)
|
||||
assert.Equal(t, template.MinAutostartIntervalMillis, updated.MinAutostartIntervalMillis)
|
||||
})
|
||||
|
||||
+1
-1
@@ -36,7 +36,7 @@ func templateInit() *cobra.Command {
|
||||
|
||||
_, _ = fmt.Fprintln(cmd.OutOrStdout(), cliui.Styles.Wrap.Render(
|
||||
"A template defines infrastructure as code to be provisioned "+
|
||||
"for individual developer workspaces. Select an example to get started:\n"))
|
||||
"for individual developer workspaces. Select an example to be copied to the active directory:\n"))
|
||||
option, err := cliui.Select(cmd, cliui.SelectOptions{
|
||||
Options: exampleNames,
|
||||
})
|
||||
|
||||
+9
-4
@@ -16,7 +16,7 @@ func templateList() *cobra.Command {
|
||||
Short: "List all the templates available for the organization",
|
||||
Aliases: []string{"ls"},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -30,12 +30,17 @@ func templateList() *cobra.Command {
|
||||
}
|
||||
|
||||
if len(templates) == 0 {
|
||||
_, _ = fmt.Fprintf(cmd.OutOrStdout(), "%s No templates found in %s! Create one:\n\n", caret, color.HiWhiteString(organization.Name))
|
||||
_, _ = fmt.Fprintln(cmd.OutOrStdout(), color.HiMagentaString(" $ coder templates create <directory>\n"))
|
||||
_, _ = fmt.Fprintf(cmd.ErrOrStderr(), "%s No templates found in %s! Create one:\n\n", caret, color.HiWhiteString(organization.Name))
|
||||
_, _ = fmt.Fprintln(cmd.ErrOrStderr(), color.HiMagentaString(" $ coder templates create <directory>\n"))
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(cmd.OutOrStdout(), displayTemplates(columns, templates...))
|
||||
out, err := displayTemplates(columns, templates...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(cmd.OutOrStdout(), out)
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -36,10 +37,15 @@ func TestTemplateList(t *testing.T) {
|
||||
errC <- cmd.Execute()
|
||||
}()
|
||||
|
||||
// expect that templates are listed alphebetically
|
||||
var templatesList = []string{firstTemplate.Name, secondTemplate.Name}
|
||||
sort.Strings(templatesList)
|
||||
|
||||
require.NoError(t, <-errC)
|
||||
|
||||
pty.ExpectMatch(firstTemplate.Name)
|
||||
pty.ExpectMatch(secondTemplate.Name)
|
||||
for _, name := range templatesList {
|
||||
pty.ExpectMatch(name)
|
||||
}
|
||||
})
|
||||
t.Run("NoTemplates", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
@@ -51,7 +57,7 @@ func TestTemplateList(t *testing.T) {
|
||||
|
||||
pty := ptytest.New(t)
|
||||
cmd.SetIn(pty.Input())
|
||||
cmd.SetOut(pty.Output())
|
||||
cmd.SetErr(pty.Output())
|
||||
|
||||
errC := make(chan error)
|
||||
go func() {
|
||||
|
||||
+1
-1
@@ -8,7 +8,7 @@ func templatePlan() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "plan <directory>",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
Short: "Plan a template update from the current directory",
|
||||
Short: "Plan a template push from the current directory",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
|
||||
+1
-1
@@ -29,7 +29,7 @@ func templatePull() *cobra.Command {
|
||||
dest = args[1]
|
||||
}
|
||||
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create client: %w", err)
|
||||
}
|
||||
|
||||
+1
-1
@@ -29,7 +29,7 @@ func templatePush() *cobra.Command {
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
Short: "Push a new template version from the current directory or as specified by flag",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
+30
-24
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/jedib0t/go-pretty/v6/table"
|
||||
"github.com/google/uuid"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/coder/coder/cli/cliui"
|
||||
@@ -46,35 +46,41 @@ func templates() *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
type templateTableRow struct {
|
||||
Name string `table:"name"`
|
||||
CreatedAt string `table:"created at"`
|
||||
LastUpdated string `table:"last updated"`
|
||||
OrganizationID uuid.UUID `table:"organization id"`
|
||||
Provisioner codersdk.ProvisionerType `table:"provisioner"`
|
||||
ActiveVersionID uuid.UUID `table:"active version id"`
|
||||
UsedBy string `table:"used by"`
|
||||
MaxTTL time.Duration `table:"max ttl"`
|
||||
MinAutostartInterval time.Duration `table:"min autostart"`
|
||||
}
|
||||
|
||||
// displayTemplates will return a table displaying all templates passed in.
|
||||
// filterColumns must be a subset of the template fields and will determine which
|
||||
// columns to display
|
||||
func displayTemplates(filterColumns []string, templates ...codersdk.Template) string {
|
||||
tableWriter := cliui.Table()
|
||||
header := table.Row{
|
||||
"Name", "Created At", "Last Updated", "Organization ID", "Provisioner",
|
||||
"Active Version ID", "Used By", "Max TTL", "Min Autostart"}
|
||||
tableWriter.AppendHeader(header)
|
||||
tableWriter.SetColumnConfigs(cliui.FilterTableColumns(header, filterColumns))
|
||||
tableWriter.SortBy([]table.SortBy{{
|
||||
Name: "name",
|
||||
}})
|
||||
for _, template := range templates {
|
||||
func displayTemplates(filterColumns []string, templates ...codersdk.Template) (string, error) {
|
||||
rows := make([]templateTableRow, len(templates))
|
||||
for i, template := range templates {
|
||||
suffix := ""
|
||||
if template.WorkspaceOwnerCount != 1 {
|
||||
suffix = "s"
|
||||
}
|
||||
tableWriter.AppendRow(table.Row{
|
||||
template.Name,
|
||||
template.CreatedAt.Format("January 2, 2006"),
|
||||
template.UpdatedAt.Format("January 2, 2006"),
|
||||
template.OrganizationID.String(),
|
||||
template.Provisioner,
|
||||
template.ActiveVersionID.String(),
|
||||
cliui.Styles.Fuchsia.Render(fmt.Sprintf("%d developer%s", template.WorkspaceOwnerCount, suffix)),
|
||||
(time.Duration(template.MaxTTLMillis) * time.Millisecond).String(),
|
||||
(time.Duration(template.MinAutostartIntervalMillis) * time.Millisecond).String(),
|
||||
})
|
||||
|
||||
rows[i] = templateTableRow{
|
||||
Name: template.Name,
|
||||
CreatedAt: template.CreatedAt.Format("January 2, 2006"),
|
||||
LastUpdated: template.UpdatedAt.Format("January 2, 2006"),
|
||||
OrganizationID: template.OrganizationID,
|
||||
Provisioner: template.Provisioner,
|
||||
ActiveVersionID: template.ActiveVersionID,
|
||||
UsedBy: cliui.Styles.Fuchsia.Render(fmt.Sprintf("%d developer%s", template.WorkspaceOwnerCount, suffix)),
|
||||
MaxTTL: (time.Duration(template.MaxTTLMillis) * time.Millisecond),
|
||||
MinAutostartInterval: (time.Duration(template.MinAutostartIntervalMillis) * time.Millisecond),
|
||||
}
|
||||
}
|
||||
return tableWriter.Render()
|
||||
|
||||
return cliui.DisplayTable(rows, "name", filterColumns)
|
||||
}
|
||||
|
||||
+30
-17
@@ -3,9 +3,9 @@ package cli
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/jedib0t/go-pretty/v6/table"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@@ -38,7 +38,7 @@ func templateVersionsList() *cobra.Command {
|
||||
Args: cobra.ExactArgs(1),
|
||||
Short: "List all the versions of the specified template",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create client: %w", err)
|
||||
}
|
||||
@@ -58,31 +58,44 @@ func templateVersionsList() *cobra.Command {
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get template versions by template: %w", err)
|
||||
}
|
||||
_, err = fmt.Fprintln(cmd.OutOrStdout(), displayTemplateVersions(template.ActiveVersionID, versions...))
|
||||
|
||||
out, err := displayTemplateVersions(template.ActiveVersionID, versions...)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("render table: %w", err)
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(cmd.OutOrStdout(), out)
|
||||
return err
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type templateVersionRow struct {
|
||||
Name string `table:"name"`
|
||||
CreatedAt time.Time `table:"created at"`
|
||||
CreatedBy string `table:"created by"`
|
||||
Status string `table:"status"`
|
||||
Active string `table:"active"`
|
||||
}
|
||||
|
||||
// displayTemplateVersions will return a table displaying existing
|
||||
// template versions for the specified template.
|
||||
func displayTemplateVersions(activeVersionID uuid.UUID, templateVersions ...codersdk.TemplateVersion) string {
|
||||
tableWriter := cliui.Table()
|
||||
header := table.Row{
|
||||
"Name", "Created At", "Created By", "Status", ""}
|
||||
tableWriter.AppendHeader(header)
|
||||
for _, templateVersion := range templateVersions {
|
||||
func displayTemplateVersions(activeVersionID uuid.UUID, templateVersions ...codersdk.TemplateVersion) (string, error) {
|
||||
rows := make([]templateVersionRow, len(templateVersions))
|
||||
for i, templateVersion := range templateVersions {
|
||||
var activeStatus = ""
|
||||
if templateVersion.ID == activeVersionID {
|
||||
activeStatus = cliui.Styles.Code.Render(cliui.Styles.Keyword.Render("Active"))
|
||||
}
|
||||
tableWriter.AppendRow(table.Row{
|
||||
templateVersion.Name,
|
||||
templateVersion.CreatedAt.Format("03:04:05 PM MST on Jan 2, 2006"),
|
||||
templateVersion.CreatedByName,
|
||||
strings.Title(string(templateVersion.Job.Status)),
|
||||
activeStatus,
|
||||
})
|
||||
|
||||
rows[i] = templateVersionRow{
|
||||
Name: templateVersion.Name,
|
||||
CreatedAt: templateVersion.CreatedAt,
|
||||
CreatedBy: templateVersion.CreatedByName,
|
||||
Status: strings.Title(string(templateVersion.Job.Status)),
|
||||
Active: activeStatus,
|
||||
}
|
||||
}
|
||||
return tableWriter.Render()
|
||||
|
||||
return cliui.DisplayTable(rows, "name", nil)
|
||||
}
|
||||
|
||||
+3
-2
@@ -18,10 +18,11 @@ func update() *cobra.Command {
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Annotations: workspaceCommand,
|
||||
Use: "update",
|
||||
Use: "update <workspace>",
|
||||
Args: cobra.ExactArgs(1),
|
||||
Short: "Update a workspace to the latest template version",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -0,0 +1,141 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/cli/clitest"
|
||||
"github.com/coder/coder/coderd/coderdtest"
|
||||
"github.com/coder/coder/codersdk"
|
||||
"github.com/coder/coder/provisioner/echo"
|
||||
"github.com/coder/coder/pty/ptytest"
|
||||
)
|
||||
|
||||
func TestUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Test that the function does not panic on missing arg.
|
||||
t.Run("NoArgs", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cmd, _ := clitest.New(t, "update")
|
||||
err := cmd.Execute()
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerD: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
version1 := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
|
||||
coderdtest.AwaitTemplateVersionJob(t, client, version1.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version1.ID)
|
||||
|
||||
cmd, root := clitest.New(t, "create",
|
||||
"my-workspace",
|
||||
"--template", template.Name,
|
||||
"-y",
|
||||
)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
|
||||
err := cmd.Execute()
|
||||
require.NoError(t, err)
|
||||
|
||||
ws, err := client.WorkspaceByOwnerAndName(context.Background(), "testuser", "my-workspace", codersdk.WorkspaceOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, version1.ID.String(), ws.LatestBuild.TemplateVersionID.String())
|
||||
|
||||
version2 := coderdtest.UpdateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
Provision: echo.ProvisionComplete,
|
||||
ProvisionDryRun: echo.ProvisionComplete,
|
||||
}, template.ID)
|
||||
_ = coderdtest.AwaitTemplateVersionJob(t, client, version2.ID)
|
||||
|
||||
err = client.UpdateActiveTemplateVersion(context.Background(), template.ID, codersdk.UpdateActiveTemplateVersion{
|
||||
ID: version2.ID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
cmd, root = clitest.New(t, "update", ws.Name)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
|
||||
err = cmd.Execute()
|
||||
require.NoError(t, err)
|
||||
|
||||
ws, err = client.WorkspaceByOwnerAndName(context.Background(), "testuser", "my-workspace", codersdk.WorkspaceOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, version2.ID.String(), ws.LatestBuild.TemplateVersionID.String())
|
||||
})
|
||||
|
||||
t.Run("WithParameter", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerD: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
version1 := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
|
||||
coderdtest.AwaitTemplateVersionJob(t, client, version1.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version1.ID)
|
||||
|
||||
cmd, root := clitest.New(t, "create",
|
||||
"my-workspace",
|
||||
"--template", template.Name,
|
||||
"-y",
|
||||
)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
|
||||
err := cmd.Execute()
|
||||
require.NoError(t, err)
|
||||
|
||||
ws, err := client.WorkspaceByOwnerAndName(context.Background(), "testuser", "my-workspace", codersdk.WorkspaceOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, version1.ID.String(), ws.LatestBuild.TemplateVersionID.String())
|
||||
|
||||
defaultValue := "something"
|
||||
version2 := coderdtest.UpdateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: createTestParseResponseWithDefault(defaultValue),
|
||||
Provision: echo.ProvisionComplete,
|
||||
ProvisionDryRun: echo.ProvisionComplete,
|
||||
}, template.ID)
|
||||
coderdtest.AwaitTemplateVersionJob(t, client, version2.ID)
|
||||
|
||||
err = client.UpdateActiveTemplateVersion(context.Background(), template.ID, codersdk.UpdateActiveTemplateVersion{
|
||||
ID: version2.ID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
cmd, root = clitest.New(t, "update", ws.Name)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
|
||||
pty := ptytest.New(t)
|
||||
cmd.SetIn(pty.Input())
|
||||
cmd.SetOut(pty.Output())
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
matches := []string{
|
||||
fmt.Sprintf("Enter a value (default: %q):", defaultValue), "bingo",
|
||||
"Enter a value:", "boingo",
|
||||
}
|
||||
for i := 0; i < len(matches); i += 2 {
|
||||
match := matches[i]
|
||||
value := matches[i+1]
|
||||
pty.ExpectMatch(match)
|
||||
pty.WriteLine(value)
|
||||
}
|
||||
|
||||
<-doneChan
|
||||
})
|
||||
}
|
||||
+1
-1
@@ -21,7 +21,7 @@ func userCreate() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "create",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
+9
-6
@@ -26,7 +26,7 @@ func userList() *cobra.Command {
|
||||
Use: "list",
|
||||
Aliases: []string{"ls"},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -38,7 +38,10 @@ func userList() *cobra.Command {
|
||||
out := ""
|
||||
switch outputFormat {
|
||||
case "table", "":
|
||||
out = displayUsers(columns, users...)
|
||||
out, err = cliui.DisplayTable(users, "Username", columns)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("render table: %w", err)
|
||||
}
|
||||
case "json":
|
||||
outBytes, err := json.Marshal(users)
|
||||
if err != nil {
|
||||
@@ -73,7 +76,7 @@ func userSingle() *cobra.Command {
|
||||
),
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -108,13 +111,13 @@ func userSingle() *cobra.Command {
|
||||
}
|
||||
|
||||
func displayUser(ctx context.Context, stderr io.Writer, client *codersdk.Client, user codersdk.User) string {
|
||||
tableWriter := cliui.Table()
|
||||
tw := cliui.Table()
|
||||
addRow := func(name string, value interface{}) {
|
||||
key := ""
|
||||
if name != "" {
|
||||
key = name + ":"
|
||||
}
|
||||
tableWriter.AppendRow(table.Row{
|
||||
tw.AppendRow(table.Row{
|
||||
key, value,
|
||||
})
|
||||
}
|
||||
@@ -167,5 +170,5 @@ func displayUser(ctx context.Context, stderr io.Writer, client *codersdk.Client,
|
||||
addRow("Organizations", "(none)")
|
||||
}
|
||||
|
||||
return tableWriter.Render()
|
||||
return tw.Render()
|
||||
}
|
||||
|
||||
@@ -1,12 +1,8 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/jedib0t/go-pretty/v6/table"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/coder/coder/cli/cliui"
|
||||
"github.com/coder/coder/codersdk"
|
||||
)
|
||||
|
||||
@@ -25,26 +21,3 @@ func users() *cobra.Command {
|
||||
)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// displayUsers will return a table displaying all users passed in.
|
||||
// filterColumns must be a subset of the user fields and will determine which
|
||||
// columns to display
|
||||
func displayUsers(filterColumns []string, users ...codersdk.User) string {
|
||||
tableWriter := cliui.Table()
|
||||
header := table.Row{"id", "username", "email", "created at", "status"}
|
||||
tableWriter.AppendHeader(header)
|
||||
tableWriter.SetColumnConfigs(cliui.FilterTableColumns(header, filterColumns))
|
||||
tableWriter.SortBy([]table.SortBy{{
|
||||
Name: "username",
|
||||
}})
|
||||
for _, user := range users {
|
||||
tableWriter.AppendRow(table.Row{
|
||||
user.ID.String(),
|
||||
user.Username,
|
||||
user.Email,
|
||||
user.CreatedAt.Format(time.Stamp),
|
||||
user.Status,
|
||||
})
|
||||
}
|
||||
return tableWriter.Render()
|
||||
}
|
||||
|
||||
+6
-2
@@ -43,7 +43,7 @@ func createUserStatusCommand(sdkStatus codersdk.UserStatus) *cobra.Command {
|
||||
},
|
||||
),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -59,7 +59,11 @@ func createUserStatusCommand(sdkStatus codersdk.UserStatus) *cobra.Command {
|
||||
}
|
||||
|
||||
// Display the user
|
||||
_, _ = fmt.Fprintln(cmd.OutOrStdout(), displayUsers(columns, user))
|
||||
table, err := cliui.DisplayTable([]codersdk.User{user}, "", columns)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("render user table: %w", err)
|
||||
}
|
||||
_, _ = fmt.Fprintln(cmd.OutOrStdout(), table)
|
||||
|
||||
// User status is already set to this
|
||||
if user.Status == sdkStatus {
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/coder/coder/codersdk"
|
||||
)
|
||||
|
||||
// nolint:tparallel,paralleltest
|
||||
func TestUserStatus(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, nil)
|
||||
@@ -20,7 +21,6 @@ func TestUserStatus(t *testing.T) {
|
||||
otherUser, err := other.User(context.Background(), codersdk.Me)
|
||||
require.NoError(t, err, "fetch user")
|
||||
|
||||
//nolint:paralleltest
|
||||
t.Run("StatusSelf", func(t *testing.T) {
|
||||
cmd, root := clitest.New(t, "users", "suspend", "me")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
@@ -32,7 +32,6 @@ func TestUserStatus(t *testing.T) {
|
||||
require.ErrorContains(t, err, "cannot suspend yourself")
|
||||
})
|
||||
|
||||
//nolint:paralleltest
|
||||
t.Run("StatusOther", func(t *testing.T) {
|
||||
require.Equal(t, otherUser.Status, codersdk.UserStatusActive, "start as active")
|
||||
|
||||
|
||||
+6
-6
@@ -16,11 +16,11 @@ var errInvalidTimeFormat = xerrors.New("Start time must be in the format hh:mm[a
|
||||
var errUnsupportedTimezone = xerrors.New("The location you provided looks like a timezone. Check https://ipinfo.io for your location.")
|
||||
|
||||
// durationDisplay formats a duration for easier display:
|
||||
// * Durations of 24 hours or greater are displays as Xd
|
||||
// * Durations less than 1 minute are displayed as <1m
|
||||
// * Duration is truncated to the nearest minute
|
||||
// * Empty minutes and seconds are truncated
|
||||
// * The returned string is the absolute value. Use sign()
|
||||
// - Durations of 24 hours or greater are displays as Xd
|
||||
// - Durations less than 1 minute are displayed as <1m
|
||||
// - Duration is truncated to the nearest minute
|
||||
// - Empty minutes and seconds are truncated
|
||||
// - The returned string is the absolute value. Use sign()
|
||||
// if you need to indicate if the duration is positive or
|
||||
// negative.
|
||||
func durationDisplay(d time.Duration) string {
|
||||
@@ -114,7 +114,7 @@ func parseCLISchedule(parts ...string) (*schedule.Schedule, error) {
|
||||
if loc == nil {
|
||||
loc, err = tz.TimezoneIANA()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Could not automatically determine your timezone")
|
||||
loc = time.UTC
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
+14
-8
@@ -52,6 +52,9 @@ func wireguardPortForward() *cobra.Command {
|
||||
},
|
||||
),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx, cancel := context.WithCancel(cmd.Context())
|
||||
defer cancel()
|
||||
|
||||
specs, err := parsePortForwards(tcpForwards, nil, nil)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse port-forward specs: %w", err)
|
||||
@@ -64,12 +67,12 @@ func wireguardPortForward() *cobra.Command {
|
||||
return xerrors.New("no port-forwards requested")
|
||||
}
|
||||
|
||||
client, err := createClient(cmd)
|
||||
client, err := CreateClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
workspace, workspaceAgent, err := getWorkspaceAndAgent(cmd, client, codersdk.Me, args[0], false)
|
||||
workspace, workspaceAgent, err := getWorkspaceAndAgent(ctx, cmd, client, codersdk.Me, args[0], false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -77,13 +80,13 @@ func wireguardPortForward() *cobra.Command {
|
||||
return xerrors.New("workspace must be in start transition to port-forward")
|
||||
}
|
||||
if workspace.LatestBuild.Job.CompletedAt == nil {
|
||||
err = cliui.WorkspaceBuild(cmd.Context(), cmd.ErrOrStderr(), client, workspace.LatestBuild.ID, workspace.CreatedAt)
|
||||
err = cliui.WorkspaceBuild(ctx, cmd.ErrOrStderr(), client, workspace.LatestBuild.ID, workspace.CreatedAt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = cliui.Agent(cmd.Context(), cmd.ErrOrStderr(), cliui.AgentOptions{
|
||||
err = cliui.Agent(ctx, cmd.ErrOrStderr(), cliui.AgentOptions{
|
||||
WorkspaceName: workspace.Name,
|
||||
Fetch: func(ctx context.Context) (codersdk.WorkspaceAgent, error) {
|
||||
return client.WorkspaceAgent(ctx, workspaceAgent.ID)
|
||||
@@ -101,8 +104,9 @@ func wireguardPortForward() *cobra.Command {
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create wireguard network: %w", err)
|
||||
}
|
||||
defer wgn.Close()
|
||||
|
||||
err = client.PostWireguardPeer(cmd.Context(), workspace.ID, peerwg.Handshake{
|
||||
err = client.PostWireguardPeer(ctx, workspace.ID, peerwg.Handshake{
|
||||
Recipient: workspaceAgent.ID,
|
||||
NodePublicKey: wgn.NodePrivateKey.Public(),
|
||||
DiscoPublicKey: wgn.DiscoPublicKey,
|
||||
@@ -124,7 +128,6 @@ func wireguardPortForward() *cobra.Command {
|
||||
|
||||
// Start all listeners.
|
||||
var (
|
||||
ctx, cancel = context.WithCancel(cmd.Context())
|
||||
wg = new(sync.WaitGroup)
|
||||
listeners = make([]net.Listener, len(specs))
|
||||
closeAllListeners = func() {
|
||||
@@ -136,11 +139,11 @@ func wireguardPortForward() *cobra.Command {
|
||||
}
|
||||
}
|
||||
)
|
||||
defer cancel()
|
||||
defer closeAllListeners()
|
||||
|
||||
for i, spec := range specs {
|
||||
l, err := listenAndPortForwardWireguard(ctx, cmd, wgn, wg, spec, workspaceAgent.IPv6.IP())
|
||||
if err != nil {
|
||||
closeAllListeners()
|
||||
return err
|
||||
}
|
||||
listeners[i] = l
|
||||
@@ -149,7 +152,10 @@ func wireguardPortForward() *cobra.Command {
|
||||
// Wait for the context to be canceled or for a signal and close
|
||||
// all listeners.
|
||||
var closeErr error
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
|
||||
+1
-1
@@ -15,7 +15,7 @@ import (
|
||||
func main() {
|
||||
rand.Seed(time.Now().UnixMicro())
|
||||
|
||||
cmd, err := cli.Root().ExecuteC()
|
||||
cmd, err := cli.Root(cli.AGPL()).ExecuteC()
|
||||
if err != nil {
|
||||
if errors.Is(err, cliui.Canceled) {
|
||||
os.Exit(1)
|
||||
|
||||
+1
-1
@@ -1,6 +1,6 @@
|
||||
[Unit]
|
||||
Description="Coder - Self-hosted developer workspaces on your infra"
|
||||
Documentation=https://coder.com/docs/
|
||||
Documentation=https://coder.com/docs/coder-oss
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
ConditionFileNotEmpty=/etc/coder.d/coder.env
|
||||
|
||||
@@ -99,6 +99,7 @@ func diffValues[T any](left, right T, table Table) Map {
|
||||
}
|
||||
|
||||
// convertDiffType converts external struct types to primitive types.
|
||||
//
|
||||
//nolint:forcetypeassert
|
||||
func convertDiffType(left, right any) (newLeft, newRight any, changed bool) {
|
||||
switch typed := left.(type) {
|
||||
|
||||
@@ -230,6 +230,7 @@ func runDiffTests[T audit.Auditable](t *testing.T, tests []diffTest[T]) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(typName+"/"+test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
require.Equal(t,
|
||||
test.exp,
|
||||
audit.Diff(test.left, test.right),
|
||||
|
||||
@@ -70,6 +70,7 @@ var AuditableResources = auditMap(map[any]map[string]Action{
|
||||
"provisioner": ActionTrack,
|
||||
"active_version_id": ActionTrack,
|
||||
"description": ActionTrack,
|
||||
"icon": ActionTrack,
|
||||
"max_ttl": ActionTrack,
|
||||
"min_autostart_interval": ActionTrack,
|
||||
"created_by": ActionTrack,
|
||||
@@ -94,6 +95,7 @@ var AuditableResources = auditMap(map[any]map[string]Action{
|
||||
"updated_at": ActionIgnore, // Changes, but is implicit and not helpful in a diff.
|
||||
"status": ActionTrack,
|
||||
"rbac_roles": ActionTrack,
|
||||
"login_type": ActionIgnore,
|
||||
},
|
||||
&database.Workspace{}: {
|
||||
"id": ActionTrack,
|
||||
|
||||
+36
-5
@@ -10,28 +10,59 @@ import (
|
||||
"github.com/coder/coder/coderd/rbac"
|
||||
)
|
||||
|
||||
func AuthorizeFilter[O rbac.Objecter](api *API, r *http.Request, action rbac.Action, objects []O) []O {
|
||||
func AuthorizeFilter[O rbac.Objecter](h *HTTPAuthorizer, r *http.Request, action rbac.Action, objects []O) ([]O, error) {
|
||||
roles := httpmw.AuthorizationUserRoles(r)
|
||||
return rbac.Filter(r.Context(), api.Authorizer, roles.ID.String(), roles.Roles, action, objects)
|
||||
objects, err := rbac.Filter(r.Context(), h.Authorizer, roles.ID.String(), roles.Roles, action, objects)
|
||||
if err != nil {
|
||||
// Log the error as Filter should not be erroring.
|
||||
h.Logger.Error(r.Context(), "filter failed",
|
||||
slog.Error(err),
|
||||
slog.F("user_id", roles.ID),
|
||||
slog.F("username", roles.Username),
|
||||
slog.F("route", r.URL.Path),
|
||||
slog.F("action", action),
|
||||
)
|
||||
return nil, err
|
||||
}
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
type HTTPAuthorizer struct {
|
||||
Authorizer rbac.Authorizer
|
||||
Logger slog.Logger
|
||||
}
|
||||
|
||||
// Authorize will return false if the user is not authorized to do the action.
|
||||
// This function will log appropriately, but the caller must return an
|
||||
// error to the api client.
|
||||
// Eg:
|
||||
//
|
||||
// if !api.Authorize(...) {
|
||||
// httpapi.Forbidden(rw)
|
||||
// return
|
||||
// }
|
||||
func (api *API) Authorize(r *http.Request, action rbac.Action, object rbac.Objecter) bool {
|
||||
return api.httpAuth.Authorize(r, action, object)
|
||||
}
|
||||
|
||||
// Authorize will return false if the user is not authorized to do the action.
|
||||
// This function will log appropriately, but the caller must return an
|
||||
// error to the api client.
|
||||
// Eg:
|
||||
//
|
||||
// if !h.Authorize(...) {
|
||||
// httpapi.Forbidden(rw)
|
||||
// return
|
||||
// }
|
||||
func (h *HTTPAuthorizer) Authorize(r *http.Request, action rbac.Action, object rbac.Objecter) bool {
|
||||
roles := httpmw.AuthorizationUserRoles(r)
|
||||
err := api.Authorizer.ByRoleName(r.Context(), roles.ID.String(), roles.Roles, action, object.RBACObject())
|
||||
err := h.Authorizer.ByRoleName(r.Context(), roles.ID.String(), roles.Roles, action, object.RBACObject())
|
||||
if err != nil {
|
||||
// Log the errors for debugging
|
||||
internalError := new(rbac.UnauthorizedError)
|
||||
logger := api.Logger
|
||||
logger := h.Logger
|
||||
if xerrors.As(err, internalError) {
|
||||
logger = api.Logger.With(slog.F("internal", internalError.Internal()))
|
||||
logger = h.Logger.With(slog.F("internal", internalError.Internal()))
|
||||
}
|
||||
// Log information for debugging. This will be very helpful
|
||||
// in the early days
|
||||
|
||||
@@ -193,7 +193,7 @@ func TestExecutorAutostopOK(t *testing.T) {
|
||||
|
||||
// When: the autobuild executor ticks *after* the deadline:
|
||||
go func() {
|
||||
tickCh <- workspace.LatestBuild.Deadline.Add(time.Minute)
|
||||
tickCh <- workspace.LatestBuild.Deadline.Time.Add(time.Minute)
|
||||
close(tickCh)
|
||||
}()
|
||||
|
||||
@@ -229,7 +229,7 @@ func TestExecutorAutostopExtend(t *testing.T) {
|
||||
require.NotZero(t, originalDeadline)
|
||||
|
||||
// Given: we extend the workspace deadline
|
||||
newDeadline := originalDeadline.Add(30 * time.Minute)
|
||||
newDeadline := originalDeadline.Time.Add(30 * time.Minute)
|
||||
err := client.PutExtendWorkspace(ctx, workspace.ID, codersdk.PutExtendWorkspaceRequest{
|
||||
Deadline: newDeadline,
|
||||
})
|
||||
@@ -237,7 +237,7 @@ func TestExecutorAutostopExtend(t *testing.T) {
|
||||
|
||||
// When: the autobuild executor ticks *after* the original deadline:
|
||||
go func() {
|
||||
tickCh <- originalDeadline.Add(time.Minute)
|
||||
tickCh <- originalDeadline.Time.Add(time.Minute)
|
||||
}()
|
||||
|
||||
// Then: nothing should happen and the workspace should stay running
|
||||
@@ -281,7 +281,7 @@ func TestExecutorAutostopAlreadyStopped(t *testing.T) {
|
||||
|
||||
// When: the autobuild executor ticks past the TTL
|
||||
go func() {
|
||||
tickCh <- workspace.LatestBuild.Deadline.Add(time.Minute)
|
||||
tickCh <- workspace.LatestBuild.Deadline.Time.Add(time.Minute)
|
||||
close(tickCh)
|
||||
}()
|
||||
|
||||
@@ -323,7 +323,7 @@ func TestExecutorAutostopNotEnabled(t *testing.T) {
|
||||
|
||||
// When: the autobuild executor ticks past the TTL
|
||||
go func() {
|
||||
tickCh <- workspace.LatestBuild.Deadline.Add(time.Minute)
|
||||
tickCh <- workspace.LatestBuild.Deadline.Time.Add(time.Minute)
|
||||
close(tickCh)
|
||||
}()
|
||||
|
||||
@@ -415,7 +415,7 @@ func TestExecutorWorkspaceAutostopBeforeDeadline(t *testing.T) {
|
||||
|
||||
// When: the autobuild executor ticks before the TTL
|
||||
go func() {
|
||||
tickCh <- workspace.LatestBuild.Deadline.Add(-1 * time.Minute)
|
||||
tickCh <- workspace.LatestBuild.Deadline.Time.Add(-1 * time.Minute)
|
||||
close(tickCh)
|
||||
}()
|
||||
|
||||
@@ -447,11 +447,11 @@ func TestExecutorWorkspaceAutostopNoWaitChangedMyMind(t *testing.T) {
|
||||
|
||||
// Then: the deadline should still be the original value
|
||||
updated := coderdtest.MustWorkspace(t, client, workspace.ID)
|
||||
assert.WithinDuration(t, workspace.LatestBuild.Deadline, updated.LatestBuild.Deadline, time.Minute)
|
||||
assert.WithinDuration(t, workspace.LatestBuild.Deadline.Time, updated.LatestBuild.Deadline.Time, time.Minute)
|
||||
|
||||
// When: the autobuild executor ticks after the original deadline
|
||||
go func() {
|
||||
tickCh <- workspace.LatestBuild.Deadline.Add(time.Minute)
|
||||
tickCh <- workspace.LatestBuild.Deadline.Time.Add(time.Minute)
|
||||
}()
|
||||
|
||||
// Then: the workspace should stop
|
||||
@@ -478,7 +478,7 @@ func TestExecutorWorkspaceAutostopNoWaitChangedMyMind(t *testing.T) {
|
||||
|
||||
// When: the relentless onward march of time continues
|
||||
go func() {
|
||||
tickCh <- workspace.LatestBuild.Deadline.Add(newTTL + time.Minute)
|
||||
tickCh <- workspace.LatestBuild.Deadline.Time.Add(newTTL + time.Minute)
|
||||
close(tickCh)
|
||||
}()
|
||||
|
||||
|
||||
@@ -20,14 +20,14 @@ type Notifier struct {
|
||||
}
|
||||
|
||||
// Condition is a function that gets executed with a certain time.
|
||||
// - It should return the deadline for the notification, as well as a
|
||||
// callback function to execute once the time to the deadline is
|
||||
// less than one of the notify attempts. If deadline is the zero
|
||||
// time, callback will not be executed.
|
||||
// - Callback is executed once for every time the difference between deadline
|
||||
// and the current time is less than an element of countdown.
|
||||
// - To enforce a minimum interval between consecutive callbacks, truncate
|
||||
// the returned deadline to the minimum interval.
|
||||
// - It should return the deadline for the notification, as well as a
|
||||
// callback function to execute once the time to the deadline is
|
||||
// less than one of the notify attempts. If deadline is the zero
|
||||
// time, callback will not be executed.
|
||||
// - Callback is executed once for every time the difference between deadline
|
||||
// and the current time is less than an element of countdown.
|
||||
// - To enforce a minimum interval between consecutive callbacks, truncate
|
||||
// the returned deadline to the minimum interval.
|
||||
type Condition func(now time.Time) (deadline time.Time, callback func())
|
||||
|
||||
// Notify is a convenience function that initializes a new Notifier
|
||||
@@ -44,8 +44,8 @@ func Notify(cond Condition, interval time.Duration, countdown ...time.Duration)
|
||||
}
|
||||
|
||||
// New returns a Notifier that calls cond once every time it polls.
|
||||
// - Duplicate values are removed from countdown, and it is sorted in
|
||||
// descending order.
|
||||
// - Duplicate values are removed from countdown, and it is sorted in
|
||||
// descending order.
|
||||
func New(cond Condition, countdown ...time.Duration) *Notifier {
|
||||
// Ensure countdown is sorted in descending order and contains no duplicates.
|
||||
ct := unique(countdown)
|
||||
|
||||
@@ -28,13 +28,14 @@ var defaultParser = cron.NewParser(parserFormat)
|
||||
// - day of week e.g. 1 (required)
|
||||
//
|
||||
// Example Usage:
|
||||
// local_sched, _ := schedule.Weekly("59 23 *")
|
||||
// fmt.Println(sched.Next(time.Now().Format(time.RFC3339)))
|
||||
// // Output: 2022-04-04T23:59:00Z
|
||||
//
|
||||
// us_sched, _ := schedule.Weekly("CRON_TZ=US/Central 30 9 1-5")
|
||||
// fmt.Println(sched.Next(time.Now()).Format(time.RFC3339))
|
||||
// // Output: 2022-04-04T14:30:00Z
|
||||
// local_sched, _ := schedule.Weekly("59 23 *")
|
||||
// fmt.Println(sched.Next(time.Now().Format(time.RFC3339)))
|
||||
// // Output: 2022-04-04T23:59:00Z
|
||||
//
|
||||
// us_sched, _ := schedule.Weekly("CRON_TZ=US/Central 30 9 1-5")
|
||||
// fmt.Println(sched.Next(time.Now()).Format(time.RFC3339))
|
||||
// // Output: 2022-04-04T14:30:00Z
|
||||
func Weekly(raw string) (*Schedule, error) {
|
||||
if err := validateWeeklySpec(raw); err != nil {
|
||||
return nil, xerrors.Errorf("validate weekly schedule: %w", err)
|
||||
@@ -115,12 +116,12 @@ var tMax = t0.Add(168 * time.Hour)
|
||||
|
||||
// Min returns the minimum duration of the schedule.
|
||||
// This is calculated as follows:
|
||||
// - Let t(0) be a given point in time (1970-01-01T01:01:01Z00:00)
|
||||
// - Let t(max) be 168 hours after t(0).
|
||||
// - Let t(1) be the next scheduled time after t(0).
|
||||
// - Let t(n) be the next scheduled time after t(n-1).
|
||||
// - Then, the minimum duration of s d(min)
|
||||
// = min( t(n) - t(n-1) ∀ n ∈ N, t(n) < t(max) )
|
||||
// - Let t(0) be a given point in time (1970-01-01T01:01:01Z00:00)
|
||||
// - Let t(max) be 168 hours after t(0).
|
||||
// - Let t(1) be the next scheduled time after t(0).
|
||||
// - Let t(n) be the next scheduled time after t(n-1).
|
||||
// - Then, the minimum duration of s d(min)
|
||||
// = min( t(n) - t(n-1) ∀ n ∈ N, t(n) < t(max) )
|
||||
func (s Schedule) Min() time.Duration {
|
||||
durMin := tMax.Sub(t0)
|
||||
tPrev := s.Next(t0)
|
||||
|
||||
@@ -52,8 +52,10 @@ func Validate(ctx context.Context, signature string, options x509.VerifyOptions)
|
||||
}
|
||||
data, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
_ = res.Body.Close()
|
||||
return "", xerrors.Errorf("read body %q: %w", certURL, err)
|
||||
}
|
||||
_ = res.Body.Close()
|
||||
cert, err := x509.ParseCertificate(data)
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("parse certificate %q: %w", certURL, err)
|
||||
|
||||
+32
-5
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/pion/webrtc/v3"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||
"golang.org/x/xerrors"
|
||||
"google.golang.org/api/idtoken"
|
||||
@@ -58,12 +59,15 @@ type Options struct {
|
||||
GoogleTokenValidator *idtoken.Validator
|
||||
GithubOAuth2Config *GithubOAuth2Config
|
||||
OIDCConfig *OIDCConfig
|
||||
PrometheusRegistry *prometheus.Registry
|
||||
ICEServers []webrtc.ICEServer
|
||||
SecureAuthCookie bool
|
||||
SSHKeygenAlgorithm gitsshkey.Algorithm
|
||||
Telemetry telemetry.Reporter
|
||||
TURNServer *turnconn.Server
|
||||
TracerProvider *sdktrace.TracerProvider
|
||||
AutoImportTemplates []AutoImportTemplate
|
||||
LicenseHandler http.Handler
|
||||
}
|
||||
|
||||
// New constructs a Coder API handler.
|
||||
@@ -87,6 +91,12 @@ func New(options *Options) *API {
|
||||
panic(xerrors.Errorf("rego authorize panic: %w", err))
|
||||
}
|
||||
}
|
||||
if options.PrometheusRegistry == nil {
|
||||
options.PrometheusRegistry = prometheus.NewRegistry()
|
||||
}
|
||||
if options.LicenseHandler == nil {
|
||||
options.LicenseHandler = licenses()
|
||||
}
|
||||
|
||||
siteCacheDir := options.CacheDir
|
||||
if siteCacheDir != "" {
|
||||
@@ -102,6 +112,10 @@ func New(options *Options) *API {
|
||||
Options: options,
|
||||
Handler: r,
|
||||
siteHandler: site.Handler(site.FS(), binFS),
|
||||
httpAuth: &HTTPAuthorizer{
|
||||
Authorizer: options.Authorizer,
|
||||
Logger: options.Logger,
|
||||
},
|
||||
}
|
||||
api.workspaceAgentCache = wsconncache.New(api.dialWorkspaceAgent, 0)
|
||||
oauthConfigs := &httpmw.OAuth2Configs{
|
||||
@@ -116,23 +130,25 @@ func New(options *Options) *API {
|
||||
next.ServeHTTP(middleware.NewWrapResponseWriter(w, r.ProtoMajor), r)
|
||||
})
|
||||
},
|
||||
httpmw.Prometheus,
|
||||
tracing.HTTPMW(api.TracerProvider, "coderd.http"),
|
||||
httpmw.Prometheus(options.PrometheusRegistry),
|
||||
)
|
||||
|
||||
apps := func(r chi.Router) {
|
||||
r.Use(
|
||||
httpmw.RateLimitPerMinute(options.APIRateLimit),
|
||||
tracing.HTTPMW(api.TracerProvider, "coderd.http"),
|
||||
httpmw.ExtractAPIKey(options.Database, oauthConfigs, true),
|
||||
httpmw.ExtractUserParam(api.Database),
|
||||
// Extracts the <workspace.agent> from the url
|
||||
httpmw.ExtractWorkspaceAndAgentParam(api.Database),
|
||||
)
|
||||
r.HandleFunc("/*", api.workspaceAppsProxyPath)
|
||||
}
|
||||
// %40 is the encoded character of the @ symbol. VS Code Web does
|
||||
// not handle character encoding properly, so it's safe to assume
|
||||
// other applications might not as well.
|
||||
r.Route("/%40{user}/{workspacename}/apps/{workspaceapp}", apps)
|
||||
r.Route("/@{user}/{workspacename}/apps/{workspaceapp}", apps)
|
||||
r.Route("/%40{user}/{workspace_and_agent}/apps/{workspaceapp}", apps)
|
||||
r.Route("/@{user}/{workspace_and_agent}/apps/{workspaceapp}", apps)
|
||||
|
||||
r.Route("/api/v2", func(r chi.Router) {
|
||||
r.NotFound(func(rw http.ResponseWriter, r *http.Request) {
|
||||
@@ -144,6 +160,7 @@ func New(options *Options) *API {
|
||||
// Specific routes can specify smaller limits.
|
||||
httpmw.RateLimitPerMinute(options.APIRateLimit),
|
||||
debugLogRequest(api.Logger),
|
||||
tracing.HTTPMW(api.TracerProvider, "coderd.http"),
|
||||
)
|
||||
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
httpapi.Write(w, http.StatusOK, codersdk.Response{
|
||||
@@ -335,7 +352,7 @@ func New(options *Options) *API {
|
||||
r.Get("/", api.workspaceAgent)
|
||||
r.Post("/peer", api.postWorkspaceAgentWireguardPeer)
|
||||
r.Get("/dial", api.workspaceAgentDial)
|
||||
r.Get("/turn", api.workspaceAgentTurn)
|
||||
r.Get("/turn", api.userWorkspaceAgentTurn)
|
||||
r.Get("/pty", api.workspaceAgentPTY)
|
||||
r.Get("/iceservers", api.workspaceAgentICEServers)
|
||||
r.Get("/derp", api.derpMap)
|
||||
@@ -359,6 +376,7 @@ func New(options *Options) *API {
|
||||
httpmw.ExtractWorkspaceParam(options.Database),
|
||||
)
|
||||
r.Get("/", api.workspace)
|
||||
r.Patch("/", api.patchWorkspace)
|
||||
r.Route("/builds", func(r chi.Router) {
|
||||
r.Get("/", api.workspaceBuilds)
|
||||
r.Post("/", api.postWorkspaceBuilds)
|
||||
@@ -386,6 +404,14 @@ func New(options *Options) *API {
|
||||
r.Get("/resources", api.workspaceBuildResources)
|
||||
r.Get("/state", api.workspaceBuildState)
|
||||
})
|
||||
r.Route("/entitlements", func(r chi.Router) {
|
||||
r.Use(apiKeyMiddleware)
|
||||
r.Get("/", entitlements)
|
||||
})
|
||||
r.Route("/licenses", func(r chi.Router) {
|
||||
r.Use(apiKeyMiddleware)
|
||||
r.Mount("/", options.LicenseHandler)
|
||||
})
|
||||
})
|
||||
|
||||
r.NotFound(compressHandler(http.HandlerFunc(api.siteHandler.ServeHTTP)).ServeHTTP)
|
||||
@@ -400,6 +426,7 @@ type API struct {
|
||||
websocketWaitMutex sync.Mutex
|
||||
websocketWaitGroup sync.WaitGroup
|
||||
workspaceAgentCache *wsconncache.Cache
|
||||
httpAuth *HTTPAuthorizer
|
||||
}
|
||||
|
||||
// Close waits for all WebSocket connections to drain before returning.
|
||||
|
||||
+10
-552
@@ -2,44 +2,13 @@ package coderd_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"database/sql"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
"golang.org/x/xerrors"
|
||||
"google.golang.org/api/idtoken"
|
||||
"google.golang.org/api/option"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
|
||||
"github.com/coder/coder/buildinfo"
|
||||
"github.com/coder/coder/coderd"
|
||||
"github.com/coder/coder/coderd/autobuild/executor"
|
||||
"github.com/coder/coder/coderd/coderdtest"
|
||||
"github.com/coder/coder/coderd/database"
|
||||
"github.com/coder/coder/coderd/database/databasefake"
|
||||
"github.com/coder/coder/coderd/database/postgres"
|
||||
"github.com/coder/coder/coderd/gitsshkey"
|
||||
"github.com/coder/coder/coderd/rbac"
|
||||
"github.com/coder/coder/coderd/telemetry"
|
||||
"github.com/coder/coder/coderd/turnconn"
|
||||
"github.com/coder/coder/codersdk"
|
||||
"github.com/coder/coder/provisioner/echo"
|
||||
"github.com/coder/coder/provisionersdk/proto"
|
||||
"github.com/coder/coder/testutil"
|
||||
)
|
||||
|
||||
@@ -50,7 +19,11 @@ func TestMain(m *testing.M) {
|
||||
func TestBuildInfo(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, nil)
|
||||
buildInfo, err := client.BuildInfo(context.Background())
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
|
||||
buildInfo, err := client.BuildInfo(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, buildinfo.ExternalURL(), buildInfo.ExternalURL, "external URL")
|
||||
require.Equal(t, buildinfo.Version(), buildInfo.Version, "version")
|
||||
@@ -59,524 +32,9 @@ func TestBuildInfo(t *testing.T) {
|
||||
// TestAuthorizeAllEndpoints will check `authorize` is called on every endpoint registered.
|
||||
func TestAuthorizeAllEndpoints(t *testing.T) {
|
||||
t.Parallel()
|
||||
var (
|
||||
ctx = context.Background()
|
||||
authorizer = &fakeAuthorizer{}
|
||||
)
|
||||
|
||||
// This function was taken from coderdtest.newWithAPI. It is intentionally
|
||||
// copied to avoid exposing the API to other tests in coderd. Tests should
|
||||
// not need a reference to coderd.API...this test is an exception.
|
||||
newClient := func(authorizer rbac.Authorizer) (*codersdk.Client, *coderd.API) {
|
||||
// This can be hotswapped for a live database instance.
|
||||
db := databasefake.New()
|
||||
pubsub := database.NewPubsubInMemory()
|
||||
if os.Getenv("DB") != "" {
|
||||
connectionURL, closePg, err := postgres.Open()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(closePg)
|
||||
sqlDB, err := sql.Open("postgres", connectionURL)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
_ = sqlDB.Close()
|
||||
})
|
||||
err = database.MigrateUp(sqlDB)
|
||||
require.NoError(t, err)
|
||||
db = database.New(sqlDB)
|
||||
|
||||
pubsub, err = database.NewPubsub(context.Background(), sqlDB, connectionURL)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
_ = pubsub.Close()
|
||||
})
|
||||
}
|
||||
|
||||
tickerCh := make(chan time.Time)
|
||||
t.Cleanup(func() { close(tickerCh) })
|
||||
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
defer t.Cleanup(cancelFunc) // Defer to ensure cancelFunc is executed first.
|
||||
|
||||
lifecycleExecutor := executor.New(
|
||||
ctx,
|
||||
db,
|
||||
slogtest.Make(t, nil).Named("autobuild.executor").Leveled(slog.LevelDebug),
|
||||
tickerCh,
|
||||
).WithStatsChannel(nil)
|
||||
lifecycleExecutor.Run()
|
||||
|
||||
srv := httptest.NewUnstartedServer(nil)
|
||||
srv.Config.BaseContext = func(_ net.Listener) context.Context {
|
||||
return ctx
|
||||
}
|
||||
srv.Start()
|
||||
t.Cleanup(srv.Close)
|
||||
serverURL, err := url.Parse(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
turnServer, err := turnconn.New(nil)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
_ = turnServer.Close()
|
||||
})
|
||||
|
||||
validator, err := idtoken.NewValidator(ctx, option.WithoutAuthentication())
|
||||
require.NoError(t, err)
|
||||
|
||||
// We set the handler after server creation for the access URL.
|
||||
coderAPI := coderd.New(&coderd.Options{
|
||||
AgentConnectionUpdateFrequency: 150 * time.Millisecond,
|
||||
AccessURL: serverURL,
|
||||
Logger: slogtest.Make(t, nil).Leveled(slog.LevelDebug),
|
||||
Database: db,
|
||||
Pubsub: pubsub,
|
||||
|
||||
AWSCertificates: nil,
|
||||
AzureCertificates: x509.VerifyOptions{},
|
||||
GithubOAuth2Config: nil,
|
||||
GoogleTokenValidator: validator,
|
||||
SSHKeygenAlgorithm: gitsshkey.AlgorithmEd25519,
|
||||
TURNServer: turnServer,
|
||||
APIRateLimit: 0,
|
||||
Authorizer: authorizer,
|
||||
Telemetry: telemetry.NewNoop(),
|
||||
})
|
||||
srv.Config.Handler = coderAPI.Handler
|
||||
|
||||
_ = coderdtest.NewProvisionerDaemon(t, coderAPI)
|
||||
t.Cleanup(func() {
|
||||
_ = coderAPI.Close()
|
||||
})
|
||||
|
||||
return codersdk.New(serverURL), coderAPI
|
||||
}
|
||||
|
||||
client, api := newClient(authorizer)
|
||||
admin := coderdtest.CreateFirstUser(t, client)
|
||||
// The provisioner will call to coderd and register itself. This is async,
|
||||
// so we wait for it to occur.
|
||||
require.Eventually(t, func() bool {
|
||||
provisionerds, err := client.ProvisionerDaemons(ctx)
|
||||
return assert.NoError(t, err) && len(provisionerds) > 0
|
||||
}, testutil.WaitLong, testutil.IntervalSlow)
|
||||
|
||||
provisionerds, err := client.ProvisionerDaemons(ctx)
|
||||
require.NoError(t, err, "fetch provisioners")
|
||||
require.Len(t, provisionerds, 1)
|
||||
|
||||
organization, err := client.Organization(ctx, admin.OrganizationID)
|
||||
require.NoError(t, err, "fetch org")
|
||||
|
||||
// Setup some data in the database.
|
||||
version := coderdtest.CreateTemplateVersion(t, client, admin.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
Provision: []*proto.Provision_Response{{
|
||||
Type: &proto.Provision_Response_Complete{
|
||||
Complete: &proto.Provision_Complete{
|
||||
// Return a workspace resource
|
||||
Resources: []*proto.Resource{{
|
||||
Name: "some",
|
||||
Type: "example",
|
||||
Agents: []*proto.Agent{{
|
||||
Id: "something",
|
||||
Auth: &proto.Agent_Token{},
|
||||
Apps: []*proto.App{{
|
||||
Name: "app",
|
||||
Url: "http://localhost:3000",
|
||||
}},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, admin.OrganizationID, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, client, admin.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJob(t, client, workspace.LatestBuild.ID)
|
||||
file, err := client.Upload(ctx, codersdk.ContentTypeTar, make([]byte, 1024))
|
||||
require.NoError(t, err, "upload file")
|
||||
workspaceResources, err := client.WorkspaceResourcesByBuild(ctx, workspace.LatestBuild.ID)
|
||||
require.NoError(t, err, "workspace resources")
|
||||
templateVersionDryRun, err := client.CreateTemplateVersionDryRun(ctx, version.ID, codersdk.CreateTemplateVersionDryRunRequest{
|
||||
ParameterValues: []codersdk.CreateParameterRequest{},
|
||||
})
|
||||
require.NoError(t, err, "template version dry-run")
|
||||
|
||||
templateParam, err := client.CreateParameter(ctx, codersdk.ParameterTemplate, template.ID, codersdk.CreateParameterRequest{
|
||||
Name: "test-param",
|
||||
SourceValue: "hello world",
|
||||
SourceScheme: codersdk.ParameterSourceSchemeData,
|
||||
DestinationScheme: codersdk.ParameterDestinationSchemeProvisionerVariable,
|
||||
})
|
||||
require.NoError(t, err, "create template param")
|
||||
|
||||
// Always fail auth from this point forward
|
||||
authorizer.AlwaysReturn = rbac.ForbiddenWithInternal(xerrors.New("fake implementation"), nil, nil)
|
||||
|
||||
// Some quick reused objects
|
||||
workspaceRBACObj := rbac.ResourceWorkspace.InOrg(organization.ID).WithID(workspace.ID.String()).WithOwner(workspace.OwnerID.String())
|
||||
|
||||
// skipRoutes allows skipping routes from being checked.
|
||||
skipRoutes := map[string]string{
|
||||
"POST:/api/v2/users/logout": "Logging out deletes the API Key for other routes",
|
||||
}
|
||||
|
||||
type routeCheck struct {
|
||||
NoAuthorize bool
|
||||
AssertAction rbac.Action
|
||||
AssertObject rbac.Object
|
||||
StatusCode int
|
||||
}
|
||||
assertRoute := map[string]routeCheck{
|
||||
// These endpoints do not require auth
|
||||
"GET:/api/v2": {NoAuthorize: true},
|
||||
"GET:/api/v2/buildinfo": {NoAuthorize: true},
|
||||
"GET:/api/v2/users/first": {NoAuthorize: true},
|
||||
"POST:/api/v2/users/first": {NoAuthorize: true},
|
||||
"POST:/api/v2/users/login": {NoAuthorize: true},
|
||||
"GET:/api/v2/users/authmethods": {NoAuthorize: true},
|
||||
"POST:/api/v2/csp/reports": {NoAuthorize: true},
|
||||
|
||||
"GET:/%40{user}/{workspacename}/apps/{application}/*": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/@{user}/{workspacename}/apps/{application}/*": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
|
||||
// Has it's own auth
|
||||
"GET:/api/v2/users/oauth2/github/callback": {NoAuthorize: true},
|
||||
"GET:/api/v2/users/oidc/callback": {NoAuthorize: true},
|
||||
|
||||
// All workspaceagents endpoints do not use rbac
|
||||
"POST:/api/v2/workspaceagents/aws-instance-identity": {NoAuthorize: true},
|
||||
"POST:/api/v2/workspaceagents/azure-instance-identity": {NoAuthorize: true},
|
||||
"POST:/api/v2/workspaceagents/google-instance-identity": {NoAuthorize: true},
|
||||
"GET:/api/v2/workspaceagents/me/gitsshkey": {NoAuthorize: true},
|
||||
"GET:/api/v2/workspaceagents/me/iceservers": {NoAuthorize: true},
|
||||
"GET:/api/v2/workspaceagents/me/listen": {NoAuthorize: true},
|
||||
"GET:/api/v2/workspaceagents/me/metadata": {NoAuthorize: true},
|
||||
"GET:/api/v2/workspaceagents/me/turn": {NoAuthorize: true},
|
||||
"GET:/api/v2/workspaceagents/me/derp": {NoAuthorize: true},
|
||||
"GET:/api/v2/workspaceagents/me/wireguardlisten": {NoAuthorize: true},
|
||||
"POST:/api/v2/workspaceagents/me/keys": {NoAuthorize: true},
|
||||
"GET:/api/v2/workspaceagents/{workspaceagent}/iceservers": {NoAuthorize: true},
|
||||
"GET:/api/v2/workspaceagents/{workspaceagent}/turn": {NoAuthorize: true},
|
||||
"GET:/api/v2/workspaceagents/{workspaceagent}/derp": {NoAuthorize: true},
|
||||
|
||||
// These endpoints have more assertions. This is good, add more endpoints to assert if you can!
|
||||
"GET:/api/v2/organizations/{organization}": {AssertObject: rbac.ResourceOrganization.InOrg(admin.OrganizationID)},
|
||||
"GET:/api/v2/users/{user}/organizations": {StatusCode: http.StatusOK, AssertObject: rbac.ResourceOrganization},
|
||||
"GET:/api/v2/users/{user}/workspace/{workspacename}": {
|
||||
AssertObject: rbac.ResourceWorkspace,
|
||||
AssertAction: rbac.ActionRead,
|
||||
},
|
||||
"GET:/api/v2/users/me/workspace/{workspacename}/builds/{buildnumber}": {
|
||||
AssertObject: rbac.ResourceWorkspace,
|
||||
AssertAction: rbac.ActionRead,
|
||||
},
|
||||
"GET:/api/v2/workspaces/{workspace}/builds/{workspacebuildname}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspacebuilds/{workspacebuild}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspacebuilds/{workspacebuild}/logs": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspaces/{workspace}/builds": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspaces/{workspace}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"PUT:/api/v2/workspaces/{workspace}/autostart": {
|
||||
AssertAction: rbac.ActionUpdate,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"PUT:/api/v2/workspaces/{workspace}/autostop": {
|
||||
AssertAction: rbac.ActionUpdate,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspaceresources/{workspaceresource}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"PATCH:/api/v2/workspacebuilds/{workspacebuild}/cancel": {
|
||||
AssertAction: rbac.ActionUpdate,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspacebuilds/{workspacebuild}/resources": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspacebuilds/{workspacebuild}/state": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspaceagents/{workspaceagent}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspaceagents/{workspaceagent}/dial": {
|
||||
AssertAction: rbac.ActionUpdate,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspaceagents/{workspaceagent}/pty": {
|
||||
AssertAction: rbac.ActionUpdate,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspaces/": {
|
||||
StatusCode: http.StatusOK,
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/organizations/{organization}/templates": {
|
||||
StatusCode: http.StatusOK,
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(template.OrganizationID).WithID(template.ID.String()),
|
||||
},
|
||||
"POST:/api/v2/organizations/{organization}/templates": {
|
||||
AssertAction: rbac.ActionCreate,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(organization.ID),
|
||||
},
|
||||
"DELETE:/api/v2/templates/{template}": {
|
||||
AssertAction: rbac.ActionDelete,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(template.OrganizationID).WithID(template.ID.String()),
|
||||
},
|
||||
"GET:/api/v2/templates/{template}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(template.OrganizationID).WithID(template.ID.String()),
|
||||
},
|
||||
"POST:/api/v2/files": {AssertAction: rbac.ActionCreate, AssertObject: rbac.ResourceFile},
|
||||
"GET:/api/v2/files/{fileHash}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceFile.WithOwner(admin.UserID.String()).WithID(file.Hash),
|
||||
},
|
||||
"GET:/api/v2/templates/{template}/versions": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(template.OrganizationID).WithID(template.ID.String()),
|
||||
},
|
||||
"PATCH:/api/v2/templates/{template}/versions": {
|
||||
AssertAction: rbac.ActionUpdate,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(template.OrganizationID).WithID(template.ID.String()),
|
||||
},
|
||||
"GET:/api/v2/templates/{template}/versions/{templateversionname}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(template.OrganizationID).WithID(template.ID.String()),
|
||||
},
|
||||
"GET:/api/v2/templateversions/{templateversion}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(template.OrganizationID).WithID(template.ID.String()),
|
||||
},
|
||||
"PATCH:/api/v2/templateversions/{templateversion}/cancel": {
|
||||
AssertAction: rbac.ActionUpdate,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(template.OrganizationID).WithID(template.ID.String()),
|
||||
},
|
||||
"GET:/api/v2/templateversions/{templateversion}/logs": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(template.OrganizationID).WithID(template.ID.String()),
|
||||
},
|
||||
"GET:/api/v2/templateversions/{templateversion}/parameters": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(template.OrganizationID).WithID(template.ID.String()),
|
||||
},
|
||||
"GET:/api/v2/templateversions/{templateversion}/resources": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(template.OrganizationID).WithID(template.ID.String()),
|
||||
},
|
||||
"GET:/api/v2/templateversions/{templateversion}/schema": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(template.OrganizationID).WithID(template.ID.String()),
|
||||
},
|
||||
"POST:/api/v2/templateversions/{templateversion}/dry-run": {
|
||||
// The first check is to read the template
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(version.OrganizationID).WithID(template.ID.String()),
|
||||
},
|
||||
"GET:/api/v2/templateversions/{templateversion}/dry-run/{templateversiondryrun}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(version.OrganizationID).WithID(template.ID.String()),
|
||||
},
|
||||
"GET:/api/v2/templateversions/{templateversion}/dry-run/{templateversiondryrun}/resources": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(version.OrganizationID).WithID(template.ID.String()),
|
||||
},
|
||||
"GET:/api/v2/templateversions/{templateversion}/dry-run/{templateversiondryrun}/logs": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(version.OrganizationID).WithID(template.ID.String()),
|
||||
},
|
||||
"PATCH:/api/v2/templateversions/{templateversion}/dry-run/{templateversiondryrun}/cancel": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(version.OrganizationID).WithID(template.ID.String()),
|
||||
},
|
||||
"GET:/api/v2/provisionerdaemons": {
|
||||
StatusCode: http.StatusOK,
|
||||
AssertObject: rbac.ResourceProvisionerDaemon.WithID(provisionerds[0].ID.String()),
|
||||
},
|
||||
|
||||
"POST:/api/v2/parameters/{scope}/{id}": {
|
||||
AssertAction: rbac.ActionUpdate,
|
||||
AssertObject: rbac.ResourceTemplate.WithID(template.ID.String()),
|
||||
},
|
||||
"GET:/api/v2/parameters/{scope}/{id}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.WithID(template.ID.String()),
|
||||
},
|
||||
"DELETE:/api/v2/parameters/{scope}/{id}/{name}": {
|
||||
AssertAction: rbac.ActionUpdate,
|
||||
AssertObject: rbac.ResourceTemplate.WithID(template.ID.String()),
|
||||
},
|
||||
"GET:/api/v2/organizations/{organization}/templates/{templatename}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(template.OrganizationID).WithID(template.ID.String()),
|
||||
},
|
||||
"POST:/api/v2/organizations/{organization}/workspaces": {
|
||||
AssertAction: rbac.ActionCreate,
|
||||
// No ID when creating
|
||||
AssertObject: workspaceRBACObj.WithID(""),
|
||||
},
|
||||
"GET:/api/v2/workspaces/{workspace}/watch": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"POST:/api/v2/users/{user}/organizations": {
|
||||
AssertAction: rbac.ActionCreate,
|
||||
AssertObject: rbac.ResourceOrganization,
|
||||
},
|
||||
"GET:/api/v2/users": {StatusCode: http.StatusOK, AssertObject: rbac.ResourceUser},
|
||||
|
||||
// These endpoints need payloads to get to the auth part. Payloads will be required
|
||||
"PUT:/api/v2/users/{user}/roles": {StatusCode: http.StatusBadRequest, NoAuthorize: true},
|
||||
"PUT:/api/v2/organizations/{organization}/members/{user}/roles": {NoAuthorize: true},
|
||||
"POST:/api/v2/workspaces/{workspace}/builds": {StatusCode: http.StatusBadRequest, NoAuthorize: true},
|
||||
"POST:/api/v2/organizations/{organization}/templateversions": {StatusCode: http.StatusBadRequest, NoAuthorize: true},
|
||||
}
|
||||
|
||||
for k, v := range assertRoute {
|
||||
noTrailSlash := strings.TrimRight(k, "/")
|
||||
if _, ok := assertRoute[noTrailSlash]; ok && noTrailSlash != k {
|
||||
t.Errorf("route %q & %q is declared twice", noTrailSlash, k)
|
||||
t.FailNow()
|
||||
}
|
||||
assertRoute[noTrailSlash] = v
|
||||
}
|
||||
|
||||
for k, v := range skipRoutes {
|
||||
noTrailSlash := strings.TrimRight(k, "/")
|
||||
if _, ok := skipRoutes[noTrailSlash]; ok && noTrailSlash != k {
|
||||
t.Errorf("route %q & %q is declared twice", noTrailSlash, k)
|
||||
t.FailNow()
|
||||
}
|
||||
skipRoutes[noTrailSlash] = v
|
||||
}
|
||||
|
||||
err = chi.Walk(api.Handler, func(method string, route string, handler http.Handler, middlewares ...func(http.Handler) http.Handler) error {
|
||||
name := method + ":" + route
|
||||
if _, ok := skipRoutes[strings.TrimRight(name, "/")]; ok {
|
||||
return nil
|
||||
}
|
||||
t.Run(name, func(t *testing.T) {
|
||||
authorizer.reset()
|
||||
routeAssertions, ok := assertRoute[strings.TrimRight(name, "/")]
|
||||
if !ok {
|
||||
// By default, all omitted routes check for just "authorize" called
|
||||
routeAssertions = routeCheck{}
|
||||
}
|
||||
|
||||
// Replace all url params with known values
|
||||
route = strings.ReplaceAll(route, "{organization}", admin.OrganizationID.String())
|
||||
route = strings.ReplaceAll(route, "{user}", admin.UserID.String())
|
||||
route = strings.ReplaceAll(route, "{organizationname}", organization.Name)
|
||||
route = strings.ReplaceAll(route, "{workspace}", workspace.ID.String())
|
||||
route = strings.ReplaceAll(route, "{workspacebuild}", workspace.LatestBuild.ID.String())
|
||||
route = strings.ReplaceAll(route, "{workspacename}", workspace.Name)
|
||||
route = strings.ReplaceAll(route, "{workspacebuildname}", workspace.LatestBuild.Name)
|
||||
route = strings.ReplaceAll(route, "{workspaceagent}", workspaceResources[0].Agents[0].ID.String())
|
||||
route = strings.ReplaceAll(route, "{buildnumber}", strconv.FormatInt(int64(workspace.LatestBuild.BuildNumber), 10))
|
||||
route = strings.ReplaceAll(route, "{template}", template.ID.String())
|
||||
route = strings.ReplaceAll(route, "{hash}", file.Hash)
|
||||
route = strings.ReplaceAll(route, "{workspaceresource}", workspaceResources[0].ID.String())
|
||||
route = strings.ReplaceAll(route, "{workspaceapp}", workspaceResources[0].Agents[0].Apps[0].Name)
|
||||
route = strings.ReplaceAll(route, "{templateversion}", version.ID.String())
|
||||
route = strings.ReplaceAll(route, "{templateversiondryrun}", templateVersionDryRun.ID.String())
|
||||
route = strings.ReplaceAll(route, "{templatename}", template.Name)
|
||||
// Only checking template scoped params here
|
||||
route = strings.ReplaceAll(route, "{scope}", string(templateParam.Scope))
|
||||
route = strings.ReplaceAll(route, "{id}", templateParam.ScopeID.String())
|
||||
|
||||
resp, err := client.Request(context.Background(), method, route, nil)
|
||||
require.NoError(t, err, "do req")
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
t.Logf("Response Body: %q", string(body))
|
||||
_ = resp.Body.Close()
|
||||
|
||||
if !routeAssertions.NoAuthorize {
|
||||
assert.NotNil(t, authorizer.Called, "authorizer expected")
|
||||
if routeAssertions.StatusCode != 0 {
|
||||
assert.Equal(t, routeAssertions.StatusCode, resp.StatusCode, "expect unauthorized")
|
||||
} else {
|
||||
// It's either a 404 or 403.
|
||||
if resp.StatusCode != http.StatusNotFound {
|
||||
assert.Equal(t, http.StatusForbidden, resp.StatusCode, "expect unauthorized")
|
||||
}
|
||||
}
|
||||
if authorizer.Called != nil {
|
||||
if routeAssertions.AssertAction != "" {
|
||||
assert.Equal(t, routeAssertions.AssertAction, authorizer.Called.Action, "resource action")
|
||||
}
|
||||
if routeAssertions.AssertObject.Type != "" {
|
||||
assert.Equal(t, routeAssertions.AssertObject.Type, authorizer.Called.Object.Type, "resource type")
|
||||
}
|
||||
if routeAssertions.AssertObject.Owner != "" {
|
||||
assert.Equal(t, routeAssertions.AssertObject.Owner, authorizer.Called.Object.Owner, "resource owner")
|
||||
}
|
||||
if routeAssertions.AssertObject.OrgID != "" {
|
||||
assert.Equal(t, routeAssertions.AssertObject.OrgID, authorizer.Called.Object.OrgID, "resource org")
|
||||
}
|
||||
if routeAssertions.AssertObject.ResourceID != "" {
|
||||
assert.Equal(t, routeAssertions.AssertObject.ResourceID, authorizer.Called.Object.ResourceID, "resource ID")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assert.Nil(t, authorizer.Called, "authorize not expected")
|
||||
}
|
||||
})
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
type authCall struct {
|
||||
SubjectID string
|
||||
Roles []string
|
||||
Action rbac.Action
|
||||
Object rbac.Object
|
||||
}
|
||||
|
||||
type fakeAuthorizer struct {
|
||||
Called *authCall
|
||||
AlwaysReturn error
|
||||
}
|
||||
|
||||
func (f *fakeAuthorizer) ByRoleName(_ context.Context, subjectID string, roleNames []string, action rbac.Action, object rbac.Object) error {
|
||||
f.Called = &authCall{
|
||||
SubjectID: subjectID,
|
||||
Roles: roleNames,
|
||||
Action: action,
|
||||
Object: object,
|
||||
}
|
||||
return f.AlwaysReturn
|
||||
}
|
||||
|
||||
func (f *fakeAuthorizer) reset() {
|
||||
f.Called = nil
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
a := coderdtest.NewAuthTester(ctx, t, nil)
|
||||
skipRoutes, assertRoute := coderdtest.AGPLRoutes(a)
|
||||
a.Test(ctx, assertRoute, skipRoutes)
|
||||
}
|
||||
|
||||
@@ -0,0 +1,562 @@
|
||||
package coderdtest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/coderd"
|
||||
"github.com/coder/coder/coderd/rbac"
|
||||
"github.com/coder/coder/codersdk"
|
||||
"github.com/coder/coder/provisioner/echo"
|
||||
"github.com/coder/coder/provisionersdk/proto"
|
||||
"github.com/coder/coder/testutil"
|
||||
)
|
||||
|
||||
type RouteCheck struct {
|
||||
NoAuthorize bool
|
||||
AssertAction rbac.Action
|
||||
AssertObject rbac.Object
|
||||
StatusCode int
|
||||
}
|
||||
|
||||
type AuthTester struct {
|
||||
t *testing.T
|
||||
api *coderd.API
|
||||
authorizer *recordingAuthorizer
|
||||
|
||||
Client *codersdk.Client
|
||||
Workspace codersdk.Workspace
|
||||
Organization codersdk.Organization
|
||||
Admin codersdk.CreateFirstUserResponse
|
||||
Template codersdk.Template
|
||||
Version codersdk.TemplateVersion
|
||||
WorkspaceResource codersdk.WorkspaceResource
|
||||
File codersdk.UploadResponse
|
||||
TemplateVersionDryRun codersdk.ProvisionerJob
|
||||
TemplateParam codersdk.Parameter
|
||||
URLParams map[string]string
|
||||
}
|
||||
|
||||
func NewAuthTester(ctx context.Context, t *testing.T, options *Options) *AuthTester {
|
||||
authorizer := &recordingAuthorizer{}
|
||||
if options == nil {
|
||||
options = &Options{}
|
||||
}
|
||||
if options.Authorizer != nil {
|
||||
t.Error("NewAuthTester cannot be called with custom Authorizer")
|
||||
}
|
||||
options.Authorizer = authorizer
|
||||
options.IncludeProvisionerD = true
|
||||
|
||||
client, _, api := newWithAPI(t, options)
|
||||
admin := CreateFirstUser(t, client)
|
||||
// The provisioner will call to coderd and register itself. This is async,
|
||||
// so we wait for it to occur.
|
||||
require.Eventually(t, func() bool {
|
||||
provisionerds, err := client.ProvisionerDaemons(ctx)
|
||||
return assert.NoError(t, err) && len(provisionerds) > 0
|
||||
}, testutil.WaitLong, testutil.IntervalSlow)
|
||||
|
||||
provisionerds, err := client.ProvisionerDaemons(ctx)
|
||||
require.NoError(t, err, "fetch provisioners")
|
||||
require.Len(t, provisionerds, 1)
|
||||
|
||||
organization, err := client.Organization(ctx, admin.OrganizationID)
|
||||
require.NoError(t, err, "fetch org")
|
||||
|
||||
// Setup some data in the database.
|
||||
version := CreateTemplateVersion(t, client, admin.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
Provision: []*proto.Provision_Response{{
|
||||
Type: &proto.Provision_Response_Complete{
|
||||
Complete: &proto.Provision_Complete{
|
||||
// Return a workspace resource
|
||||
Resources: []*proto.Resource{{
|
||||
Name: "some",
|
||||
Type: "example",
|
||||
Agents: []*proto.Agent{{
|
||||
Name: "agent",
|
||||
Id: "something",
|
||||
Auth: &proto.Agent_Token{},
|
||||
Apps: []*proto.App{{
|
||||
Name: "testapp",
|
||||
Url: "http://localhost:3000",
|
||||
}},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
AwaitTemplateVersionJob(t, client, version.ID)
|
||||
template := CreateTemplate(t, client, admin.OrganizationID, version.ID)
|
||||
workspace := CreateWorkspace(t, client, admin.OrganizationID, template.ID)
|
||||
AwaitWorkspaceBuildJob(t, client, workspace.LatestBuild.ID)
|
||||
file, err := client.Upload(ctx, codersdk.ContentTypeTar, make([]byte, 1024))
|
||||
require.NoError(t, err, "upload file")
|
||||
workspaceResources, err := client.WorkspaceResourcesByBuild(ctx, workspace.LatestBuild.ID)
|
||||
require.NoError(t, err, "workspace resources")
|
||||
templateVersionDryRun, err := client.CreateTemplateVersionDryRun(ctx, version.ID, codersdk.CreateTemplateVersionDryRunRequest{
|
||||
ParameterValues: []codersdk.CreateParameterRequest{},
|
||||
})
|
||||
require.NoError(t, err, "template version dry-run")
|
||||
|
||||
templateParam, err := client.CreateParameter(ctx, codersdk.ParameterTemplate, template.ID, codersdk.CreateParameterRequest{
|
||||
Name: "test-param",
|
||||
SourceValue: "hello world",
|
||||
SourceScheme: codersdk.ParameterSourceSchemeData,
|
||||
DestinationScheme: codersdk.ParameterDestinationSchemeProvisionerVariable,
|
||||
})
|
||||
require.NoError(t, err, "create template param")
|
||||
|
||||
urlParameters := map[string]string{
|
||||
"{organization}": admin.OrganizationID.String(),
|
||||
"{user}": admin.UserID.String(),
|
||||
"{organizationname}": organization.Name,
|
||||
"{workspace}": workspace.ID.String(),
|
||||
"{workspacebuild}": workspace.LatestBuild.ID.String(),
|
||||
"{workspacename}": workspace.Name,
|
||||
"{workspacebuildname}": workspace.LatestBuild.Name,
|
||||
"{workspaceagent}": workspaceResources[0].Agents[0].ID.String(),
|
||||
"{buildnumber}": strconv.FormatInt(int64(workspace.LatestBuild.BuildNumber), 10),
|
||||
"{template}": template.ID.String(),
|
||||
"{hash}": file.Hash,
|
||||
"{workspaceresource}": workspaceResources[0].ID.String(),
|
||||
"{workspaceapp}": workspaceResources[0].Agents[0].Apps[0].Name,
|
||||
"{templateversion}": version.ID.String(),
|
||||
"{jobID}": templateVersionDryRun.ID.String(),
|
||||
"{templatename}": template.Name,
|
||||
"{workspace_and_agent}": workspace.Name + "." + workspaceResources[0].Agents[0].Name,
|
||||
// Only checking template scoped params here
|
||||
"parameters/{scope}/{id}": fmt.Sprintf("parameters/%s/%s",
|
||||
string(templateParam.Scope), templateParam.ScopeID.String()),
|
||||
}
|
||||
|
||||
return &AuthTester{
|
||||
t: t,
|
||||
api: api,
|
||||
authorizer: authorizer,
|
||||
Client: client,
|
||||
Workspace: workspace,
|
||||
Organization: organization,
|
||||
Admin: admin,
|
||||
Template: template,
|
||||
Version: version,
|
||||
WorkspaceResource: workspaceResources[0],
|
||||
File: file,
|
||||
TemplateVersionDryRun: templateVersionDryRun,
|
||||
TemplateParam: templateParam,
|
||||
URLParams: urlParameters,
|
||||
}
|
||||
}
|
||||
|
||||
func AGPLRoutes(a *AuthTester) (map[string]string, map[string]RouteCheck) {
|
||||
// Some quick reused objects
|
||||
workspaceRBACObj := rbac.ResourceWorkspace.InOrg(a.Organization.ID).WithOwner(a.Workspace.OwnerID.String())
|
||||
workspaceExecObj := rbac.ResourceWorkspaceExecution.InOrg(a.Organization.ID).WithOwner(a.Workspace.OwnerID.String())
|
||||
// skipRoutes allows skipping routes from being checked.
|
||||
skipRoutes := map[string]string{
|
||||
"POST:/api/v2/users/logout": "Logging out deletes the API Key for other routes",
|
||||
}
|
||||
|
||||
assertRoute := map[string]RouteCheck{
|
||||
// These endpoints do not require auth
|
||||
"GET:/api/v2": {NoAuthorize: true},
|
||||
"GET:/api/v2/buildinfo": {NoAuthorize: true},
|
||||
"GET:/api/v2/users/first": {NoAuthorize: true},
|
||||
"POST:/api/v2/users/first": {NoAuthorize: true},
|
||||
"POST:/api/v2/users/login": {NoAuthorize: true},
|
||||
"GET:/api/v2/users/authmethods": {NoAuthorize: true},
|
||||
"POST:/api/v2/csp/reports": {NoAuthorize: true},
|
||||
"GET:/api/v2/entitlements": {NoAuthorize: true},
|
||||
|
||||
// Has it's own auth
|
||||
"GET:/api/v2/users/oauth2/github/callback": {NoAuthorize: true},
|
||||
"GET:/api/v2/users/oidc/callback": {NoAuthorize: true},
|
||||
|
||||
// All workspaceagents endpoints do not use rbac
|
||||
"POST:/api/v2/workspaceagents/aws-instance-identity": {NoAuthorize: true},
|
||||
"POST:/api/v2/workspaceagents/azure-instance-identity": {NoAuthorize: true},
|
||||
"POST:/api/v2/workspaceagents/google-instance-identity": {NoAuthorize: true},
|
||||
"GET:/api/v2/workspaceagents/me/gitsshkey": {NoAuthorize: true},
|
||||
"GET:/api/v2/workspaceagents/me/iceservers": {NoAuthorize: true},
|
||||
"GET:/api/v2/workspaceagents/me/listen": {NoAuthorize: true},
|
||||
"GET:/api/v2/workspaceagents/me/metadata": {NoAuthorize: true},
|
||||
"GET:/api/v2/workspaceagents/me/turn": {NoAuthorize: true},
|
||||
"GET:/api/v2/workspaceagents/me/derp": {NoAuthorize: true},
|
||||
"GET:/api/v2/workspaceagents/me/wireguardlisten": {NoAuthorize: true},
|
||||
"POST:/api/v2/workspaceagents/me/keys": {NoAuthorize: true},
|
||||
"GET:/api/v2/workspaceagents/{workspaceagent}/iceservers": {NoAuthorize: true},
|
||||
"GET:/api/v2/workspaceagents/{workspaceagent}/derp": {NoAuthorize: true},
|
||||
|
||||
// These endpoints have more assertions. This is good, add more endpoints to assert if you can!
|
||||
"GET:/api/v2/organizations/{organization}": {AssertObject: rbac.ResourceOrganization.InOrg(a.Admin.OrganizationID)},
|
||||
"GET:/api/v2/users/{user}/organizations": {StatusCode: http.StatusOK, AssertObject: rbac.ResourceOrganization},
|
||||
"GET:/api/v2/users/{user}/workspace/{workspacename}": {
|
||||
AssertObject: rbac.ResourceWorkspace,
|
||||
AssertAction: rbac.ActionRead,
|
||||
},
|
||||
"GET:/api/v2/users/{user}/workspace/{workspacename}/builds/{buildnumber}": {
|
||||
AssertObject: rbac.ResourceWorkspace,
|
||||
AssertAction: rbac.ActionRead,
|
||||
},
|
||||
"GET:/api/v2/workspaces/{workspace}/builds/{workspacebuildname}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspacebuilds/{workspacebuild}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspacebuilds/{workspacebuild}/logs": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspaces/{workspace}/builds": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspaces/{workspace}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"PUT:/api/v2/workspaces/{workspace}/autostart": {
|
||||
AssertAction: rbac.ActionUpdate,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"PUT:/api/v2/workspaces/{workspace}/ttl": {
|
||||
AssertAction: rbac.ActionUpdate,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspaceresources/{workspaceresource}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"PATCH:/api/v2/workspacebuilds/{workspacebuild}/cancel": {
|
||||
AssertAction: rbac.ActionUpdate,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspacebuilds/{workspacebuild}/resources": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspacebuilds/{workspacebuild}/state": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspaceagents/{workspaceagent}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspaceagents/{workspaceagent}/dial": {
|
||||
AssertAction: rbac.ActionCreate,
|
||||
AssertObject: workspaceExecObj,
|
||||
},
|
||||
"GET:/api/v2/workspaceagents/{workspaceagent}/turn": {
|
||||
AssertAction: rbac.ActionCreate,
|
||||
AssertObject: workspaceExecObj,
|
||||
},
|
||||
"GET:/api/v2/workspaceagents/{workspaceagent}/pty": {
|
||||
AssertAction: rbac.ActionCreate,
|
||||
AssertObject: workspaceExecObj,
|
||||
},
|
||||
"GET:/api/v2/workspaces/": {
|
||||
StatusCode: http.StatusOK,
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/organizations/{organization}/templates": {
|
||||
StatusCode: http.StatusOK,
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(a.Template.OrganizationID),
|
||||
},
|
||||
"POST:/api/v2/organizations/{organization}/templates": {
|
||||
AssertAction: rbac.ActionCreate,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(a.Organization.ID),
|
||||
},
|
||||
"DELETE:/api/v2/templates/{template}": {
|
||||
AssertAction: rbac.ActionDelete,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(a.Template.OrganizationID),
|
||||
},
|
||||
"GET:/api/v2/templates/{template}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(a.Template.OrganizationID),
|
||||
},
|
||||
"POST:/api/v2/files": {AssertAction: rbac.ActionCreate, AssertObject: rbac.ResourceFile},
|
||||
"GET:/api/v2/files/{hash}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceFile.WithOwner(a.Admin.UserID.String()),
|
||||
},
|
||||
"GET:/api/v2/templates/{template}/versions": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(a.Template.OrganizationID),
|
||||
},
|
||||
"PATCH:/api/v2/templates/{template}/versions": {
|
||||
AssertAction: rbac.ActionUpdate,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(a.Template.OrganizationID),
|
||||
},
|
||||
"GET:/api/v2/templates/{template}/versions/{templateversionname}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(a.Template.OrganizationID),
|
||||
},
|
||||
"GET:/api/v2/templateversions/{templateversion}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(a.Template.OrganizationID),
|
||||
},
|
||||
"PATCH:/api/v2/templateversions/{templateversion}/cancel": {
|
||||
AssertAction: rbac.ActionUpdate,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(a.Template.OrganizationID),
|
||||
},
|
||||
"GET:/api/v2/templateversions/{templateversion}/logs": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(a.Template.OrganizationID),
|
||||
},
|
||||
"GET:/api/v2/templateversions/{templateversion}/parameters": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(a.Template.OrganizationID),
|
||||
},
|
||||
"GET:/api/v2/templateversions/{templateversion}/resources": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(a.Template.OrganizationID),
|
||||
},
|
||||
"GET:/api/v2/templateversions/{templateversion}/schema": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(a.Template.OrganizationID),
|
||||
},
|
||||
"POST:/api/v2/templateversions/{templateversion}/dry-run": {
|
||||
// The first check is to read the template
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(a.Version.OrganizationID),
|
||||
},
|
||||
"GET:/api/v2/templateversions/{templateversion}/dry-run/{jobID}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(a.Version.OrganizationID),
|
||||
},
|
||||
"GET:/api/v2/templateversions/{templateversion}/dry-run/{jobID}/resources": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(a.Version.OrganizationID),
|
||||
},
|
||||
"GET:/api/v2/templateversions/{templateversion}/dry-run/{jobID}/logs": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(a.Version.OrganizationID),
|
||||
},
|
||||
"PATCH:/api/v2/templateversions/{templateversion}/dry-run/{jobID}/cancel": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(a.Version.OrganizationID),
|
||||
},
|
||||
"GET:/api/v2/provisionerdaemons": {
|
||||
StatusCode: http.StatusOK,
|
||||
AssertObject: rbac.ResourceProvisionerDaemon,
|
||||
},
|
||||
|
||||
"POST:/api/v2/parameters/{scope}/{id}": {
|
||||
AssertAction: rbac.ActionUpdate,
|
||||
AssertObject: rbac.ResourceTemplate,
|
||||
},
|
||||
"GET:/api/v2/parameters/{scope}/{id}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate,
|
||||
},
|
||||
"DELETE:/api/v2/parameters/{scope}/{id}/{name}": {
|
||||
AssertAction: rbac.ActionUpdate,
|
||||
AssertObject: rbac.ResourceTemplate,
|
||||
},
|
||||
"GET:/api/v2/organizations/{organization}/templates/{templatename}": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: rbac.ResourceTemplate.InOrg(a.Template.OrganizationID),
|
||||
},
|
||||
"POST:/api/v2/organizations/{organization}/workspaces": {
|
||||
AssertAction: rbac.ActionCreate,
|
||||
// No ID when creating
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/workspaces/{workspace}/watch": {
|
||||
AssertAction: rbac.ActionRead,
|
||||
AssertObject: workspaceRBACObj,
|
||||
},
|
||||
"GET:/api/v2/users": {StatusCode: http.StatusOK, AssertObject: rbac.ResourceUser},
|
||||
|
||||
// These endpoints need payloads to get to the auth part. Payloads will be required
|
||||
"PUT:/api/v2/users/{user}/roles": {StatusCode: http.StatusBadRequest, NoAuthorize: true},
|
||||
"PUT:/api/v2/organizations/{organization}/members/{user}/roles": {NoAuthorize: true},
|
||||
"POST:/api/v2/workspaces/{workspace}/builds": {StatusCode: http.StatusBadRequest, NoAuthorize: true},
|
||||
"POST:/api/v2/organizations/{organization}/templateversions": {StatusCode: http.StatusBadRequest, NoAuthorize: true},
|
||||
}
|
||||
|
||||
// Routes like proxy routes support all HTTP methods. A helper func to expand
|
||||
// 1 url to all http methods.
|
||||
assertAllHTTPMethods := func(url string, check RouteCheck) {
|
||||
methods := []string{http.MethodGet, http.MethodHead, http.MethodPost,
|
||||
http.MethodPut, http.MethodPatch, http.MethodDelete,
|
||||
http.MethodConnect, http.MethodOptions, http.MethodTrace}
|
||||
|
||||
for _, method := range methods {
|
||||
route := method + ":" + url
|
||||
assertRoute[route] = check
|
||||
}
|
||||
}
|
||||
|
||||
assertAllHTTPMethods("/%40{user}/{workspace_and_agent}/apps/{workspaceapp}/*", RouteCheck{
|
||||
AssertAction: rbac.ActionCreate,
|
||||
AssertObject: workspaceExecObj,
|
||||
})
|
||||
assertAllHTTPMethods("/@{user}/{workspace_and_agent}/apps/{workspaceapp}/*", RouteCheck{
|
||||
AssertAction: rbac.ActionCreate,
|
||||
AssertObject: workspaceExecObj,
|
||||
})
|
||||
|
||||
return skipRoutes, assertRoute
|
||||
}
|
||||
|
||||
func (a *AuthTester) Test(ctx context.Context, assertRoute map[string]RouteCheck, skipRoutes map[string]string) {
|
||||
// Always fail auth from this point forward
|
||||
a.authorizer.AlwaysReturn = rbac.ForbiddenWithInternal(xerrors.New("fake implementation"), nil, nil)
|
||||
|
||||
routeMissing := make(map[string]bool)
|
||||
for k, v := range assertRoute {
|
||||
noTrailSlash := strings.TrimRight(k, "/")
|
||||
if _, ok := assertRoute[noTrailSlash]; ok && noTrailSlash != k {
|
||||
a.t.Errorf("route %q & %q is declared twice", noTrailSlash, k)
|
||||
a.t.FailNow()
|
||||
}
|
||||
assertRoute[noTrailSlash] = v
|
||||
routeMissing[noTrailSlash] = true
|
||||
}
|
||||
|
||||
for k, v := range skipRoutes {
|
||||
noTrailSlash := strings.TrimRight(k, "/")
|
||||
if _, ok := skipRoutes[noTrailSlash]; ok && noTrailSlash != k {
|
||||
a.t.Errorf("route %q & %q is declared twice", noTrailSlash, k)
|
||||
a.t.FailNow()
|
||||
}
|
||||
skipRoutes[noTrailSlash] = v
|
||||
}
|
||||
|
||||
err := chi.Walk(
|
||||
a.api.Handler,
|
||||
func(
|
||||
method string,
|
||||
route string,
|
||||
handler http.Handler,
|
||||
middlewares ...func(http.Handler) http.Handler,
|
||||
) error {
|
||||
// work around chi's bugged handling of /*/*/ which can occur if we
|
||||
// r.Mount("/", someHandler()) in our tree
|
||||
for strings.Contains(route, "/*/") {
|
||||
route = strings.Replace(route, "/*/", "/", -1)
|
||||
}
|
||||
name := method + ":" + route
|
||||
if _, ok := skipRoutes[strings.TrimRight(name, "/")]; ok {
|
||||
return nil
|
||||
}
|
||||
a.t.Run(name, func(t *testing.T) {
|
||||
a.authorizer.reset()
|
||||
routeKey := strings.TrimRight(name, "/")
|
||||
|
||||
routeAssertions, ok := assertRoute[routeKey]
|
||||
if !ok {
|
||||
// By default, all omitted routes check for just "authorize" called
|
||||
routeAssertions = RouteCheck{}
|
||||
}
|
||||
delete(routeMissing, routeKey)
|
||||
|
||||
// Replace all url params with known values
|
||||
for k, v := range a.URLParams {
|
||||
route = strings.ReplaceAll(route, k, v)
|
||||
}
|
||||
|
||||
resp, err := a.Client.Request(ctx, method, route, nil)
|
||||
require.NoError(t, err, "do req")
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
t.Logf("Response Body: %q", string(body))
|
||||
_ = resp.Body.Close()
|
||||
|
||||
if !routeAssertions.NoAuthorize {
|
||||
assert.NotNil(t, a.authorizer.Called, "authorizer expected")
|
||||
if routeAssertions.StatusCode != 0 {
|
||||
assert.Equal(t, routeAssertions.StatusCode, resp.StatusCode, "expect unauthorized")
|
||||
} else {
|
||||
// It's either a 404 or 403.
|
||||
if resp.StatusCode != http.StatusNotFound {
|
||||
assert.Equal(t, http.StatusForbidden, resp.StatusCode, "expect unauthorized")
|
||||
}
|
||||
}
|
||||
if a.authorizer.Called != nil {
|
||||
if routeAssertions.AssertAction != "" {
|
||||
assert.Equal(t, routeAssertions.AssertAction, a.authorizer.Called.Action, "resource action")
|
||||
}
|
||||
if routeAssertions.AssertObject.Type != "" {
|
||||
assert.Equal(t, routeAssertions.AssertObject.Type, a.authorizer.Called.Object.Type, "resource type")
|
||||
}
|
||||
if routeAssertions.AssertObject.Owner != "" {
|
||||
assert.Equal(t, routeAssertions.AssertObject.Owner, a.authorizer.Called.Object.Owner, "resource owner")
|
||||
}
|
||||
if routeAssertions.AssertObject.OrgID != "" {
|
||||
assert.Equal(t, routeAssertions.AssertObject.OrgID, a.authorizer.Called.Object.OrgID, "resource org")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assert.Nil(t, a.authorizer.Called, "authorize not expected")
|
||||
}
|
||||
})
|
||||
return nil
|
||||
})
|
||||
require.NoError(a.t, err)
|
||||
require.Len(a.t, routeMissing, 0, "didn't walk some asserted routes: %v", routeMissing)
|
||||
}
|
||||
|
||||
type authCall struct {
|
||||
SubjectID string
|
||||
Roles []string
|
||||
Action rbac.Action
|
||||
Object rbac.Object
|
||||
}
|
||||
|
||||
type recordingAuthorizer struct {
|
||||
Called *authCall
|
||||
AlwaysReturn error
|
||||
}
|
||||
|
||||
func (r *recordingAuthorizer) ByRoleName(_ context.Context, subjectID string, roleNames []string, action rbac.Action, object rbac.Object) error {
|
||||
r.Called = &authCall{
|
||||
SubjectID: subjectID,
|
||||
Roles: roleNames,
|
||||
Action: action,
|
||||
Object: object,
|
||||
}
|
||||
return r.AlwaysReturn
|
||||
}
|
||||
|
||||
func (r *recordingAuthorizer) PrepareByRoleName(_ context.Context, subjectID string, roles []string, action rbac.Action, _ string) (rbac.PreparedAuthorized, error) {
|
||||
return &fakePreparedAuthorizer{
|
||||
Original: r,
|
||||
SubjectID: subjectID,
|
||||
Roles: roles,
|
||||
Action: action,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *recordingAuthorizer) reset() {
|
||||
r.Called = nil
|
||||
}
|
||||
|
||||
type fakePreparedAuthorizer struct {
|
||||
Original *recordingAuthorizer
|
||||
SubjectID string
|
||||
Roles []string
|
||||
Action rbac.Action
|
||||
}
|
||||
|
||||
func (f *fakePreparedAuthorizer) Authorize(ctx context.Context, object rbac.Object) error {
|
||||
return f.Original.ByRoleName(ctx, f.SubjectID, f.Roles, f.Action, object)
|
||||
}
|
||||
@@ -68,11 +68,13 @@ type Options struct {
|
||||
GoogleTokenValidator *idtoken.Validator
|
||||
SSHKeygenAlgorithm gitsshkey.Algorithm
|
||||
APIRateLimit int
|
||||
AutoImportTemplates []coderd.AutoImportTemplate
|
||||
AutobuildTicker <-chan time.Time
|
||||
AutobuildStats chan<- executor.Stats
|
||||
|
||||
// IncludeProvisionerD when true means to start an in-memory provisionerD
|
||||
IncludeProvisionerD bool
|
||||
APIBuilder func(*coderd.Options) *coderd.API
|
||||
}
|
||||
|
||||
// New constructs a codersdk client connected to an in-memory API instance.
|
||||
@@ -102,6 +104,14 @@ func NewWithProvisionerCloser(t *testing.T, options *Options) (*codersdk.Client,
|
||||
// and is a temporary measure while the API to register provisioners is ironed
|
||||
// out.
|
||||
func newWithCloser(t *testing.T, options *Options) (*codersdk.Client, io.Closer) {
|
||||
client, closer, _ := newWithAPI(t, options)
|
||||
return client, closer
|
||||
}
|
||||
|
||||
// newWithAPI constructs an in-memory API instance and returns a client to talk to it.
|
||||
// Most tests never need a reference to the API, but AuthorizationTest in this module uses it.
|
||||
// Do not expose the API or wrath shall descend upon thee.
|
||||
func newWithAPI(t *testing.T, options *Options) (*codersdk.Client, io.Closer, *coderd.API) {
|
||||
if options == nil {
|
||||
options = &Options{}
|
||||
}
|
||||
@@ -122,6 +132,9 @@ func newWithCloser(t *testing.T, options *Options) (*codersdk.Client, io.Closer)
|
||||
close(options.AutobuildStats)
|
||||
})
|
||||
}
|
||||
if options.APIBuilder == nil {
|
||||
options.APIBuilder = coderd.New
|
||||
}
|
||||
|
||||
// This can be hotswapped for a live database instance.
|
||||
db := databasefake.New()
|
||||
@@ -177,7 +190,7 @@ func newWithCloser(t *testing.T, options *Options) (*codersdk.Client, io.Closer)
|
||||
})
|
||||
|
||||
// We set the handler after server creation for the access URL.
|
||||
coderAPI := coderd.New(&coderd.Options{
|
||||
coderAPI := options.APIBuilder(&coderd.Options{
|
||||
AgentConnectionUpdateFrequency: 150 * time.Millisecond,
|
||||
// Force a long disconnection timeout to ensure
|
||||
// agents are not marked as disconnected during slow tests.
|
||||
@@ -198,6 +211,7 @@ func newWithCloser(t *testing.T, options *Options) (*codersdk.Client, io.Closer)
|
||||
APIRateLimit: options.APIRateLimit,
|
||||
Authorizer: options.Authorizer,
|
||||
Telemetry: telemetry.NewNoop(),
|
||||
AutoImportTemplates: options.AutoImportTemplates,
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
_ = coderAPI.Close()
|
||||
@@ -212,7 +226,7 @@ func newWithCloser(t *testing.T, options *Options) (*codersdk.Client, io.Closer)
|
||||
_ = provisionerCloser.Close()
|
||||
})
|
||||
|
||||
return codersdk.New(serverURL), provisionerCloser
|
||||
return codersdk.New(serverURL), provisionerCloser, coderAPI
|
||||
}
|
||||
|
||||
// NewProvisionerDaemon launches a provisionerd instance configured to work
|
||||
@@ -275,10 +289,15 @@ func CreateFirstUser(t *testing.T, client *codersdk.Client) codersdk.CreateFirst
|
||||
|
||||
// CreateAnotherUser creates and authenticates a new user.
|
||||
func CreateAnotherUser(t *testing.T, client *codersdk.Client, organizationID uuid.UUID, roles ...string) *codersdk.Client {
|
||||
userClient, _ := createAnotherUserRetry(t, client, organizationID, 5, roles...)
|
||||
return userClient
|
||||
}
|
||||
|
||||
func CreateAnotherUserWithUser(t *testing.T, client *codersdk.Client, organizationID uuid.UUID, roles ...string) (*codersdk.Client, codersdk.User) {
|
||||
return createAnotherUserRetry(t, client, organizationID, 5, roles...)
|
||||
}
|
||||
|
||||
func createAnotherUserRetry(t *testing.T, client *codersdk.Client, organizationID uuid.UUID, retries int, roles ...string) *codersdk.Client {
|
||||
func createAnotherUserRetry(t *testing.T, client *codersdk.Client, organizationID uuid.UUID, retries int, roles ...string) (*codersdk.Client, codersdk.User) {
|
||||
req := codersdk.CreateUserRequest{
|
||||
Email: namesgenerator.GetRandomName(10) + "@coder.com",
|
||||
Username: randomUsername(),
|
||||
@@ -337,7 +356,7 @@ func createAnotherUserRetry(t *testing.T, client *codersdk.Client, organizationI
|
||||
require.NoError(t, err, "update org membership roles")
|
||||
}
|
||||
}
|
||||
return other
|
||||
return other, user
|
||||
}
|
||||
|
||||
// CreateTemplateVersion creates a template import provisioner job
|
||||
@@ -411,11 +430,11 @@ func AwaitTemplateVersionJob(t *testing.T, client *codersdk.Client, version uuid
|
||||
|
||||
t.Logf("waiting for template version job %s", version)
|
||||
var templateVersion codersdk.TemplateVersion
|
||||
require.Eventually(t, func() bool {
|
||||
require.True(t, testutil.EventuallyShort(t, func(ctx context.Context) bool {
|
||||
var err error
|
||||
templateVersion, err = client.TemplateVersion(context.Background(), version)
|
||||
templateVersion, err = client.TemplateVersion(ctx, version)
|
||||
return assert.NoError(t, err) && templateVersion.Job.CompletedAt != nil
|
||||
}, testutil.WaitShort, testutil.IntervalFast)
|
||||
}))
|
||||
return templateVersion
|
||||
}
|
||||
|
||||
@@ -425,11 +444,10 @@ func AwaitWorkspaceBuildJob(t *testing.T, client *codersdk.Client, build uuid.UU
|
||||
|
||||
t.Logf("waiting for workspace build job %s", build)
|
||||
var workspaceBuild codersdk.WorkspaceBuild
|
||||
require.Eventually(t, func() bool {
|
||||
var err error
|
||||
workspaceBuild, err = client.WorkspaceBuild(context.Background(), build)
|
||||
require.True(t, testutil.EventuallyShort(t, func(ctx context.Context) bool {
|
||||
workspaceBuild, err := client.WorkspaceBuild(ctx, build)
|
||||
return assert.NoError(t, err) && workspaceBuild.Job.CompletedAt != nil
|
||||
}, testutil.WaitShort, testutil.IntervalFast)
|
||||
}))
|
||||
return workspaceBuild
|
||||
}
|
||||
|
||||
@@ -439,21 +457,22 @@ func AwaitWorkspaceAgents(t *testing.T, client *codersdk.Client, build uuid.UUID
|
||||
|
||||
t.Logf("waiting for workspace agents (build %s)", build)
|
||||
var resources []codersdk.WorkspaceResource
|
||||
require.Eventually(t, func() bool {
|
||||
require.True(t, testutil.EventuallyLong(t, func(ctx context.Context) bool {
|
||||
var err error
|
||||
resources, err = client.WorkspaceResourcesByBuild(context.Background(), build)
|
||||
resources, err = client.WorkspaceResourcesByBuild(ctx, build)
|
||||
if !assert.NoError(t, err) {
|
||||
return false
|
||||
}
|
||||
for _, resource := range resources {
|
||||
for _, agent := range resource.Agents {
|
||||
if agent.Status != codersdk.WorkspaceAgentConnected {
|
||||
t.Logf("agent %s not connected yet", agent.Name)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}, testutil.WaitLong, testutil.IntervalMedium)
|
||||
}))
|
||||
return resources
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestNew(t *testing.T) {
|
||||
})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
|
||||
_ = coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, client, user.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJob(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/lib/pq"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/coder/coder/coderd/database"
|
||||
@@ -26,21 +27,23 @@ func New() database.Store {
|
||||
organizations: make([]database.Organization, 0),
|
||||
users: make([]database.User, 0),
|
||||
|
||||
auditLogs: make([]database.AuditLog, 0),
|
||||
files: make([]database.File, 0),
|
||||
gitSSHKey: make([]database.GitSSHKey, 0),
|
||||
parameterSchemas: make([]database.ParameterSchema, 0),
|
||||
parameterValues: make([]database.ParameterValue, 0),
|
||||
provisionerDaemons: make([]database.ProvisionerDaemon, 0),
|
||||
provisionerJobAgents: make([]database.WorkspaceAgent, 0),
|
||||
provisionerJobLogs: make([]database.ProvisionerJobLog, 0),
|
||||
provisionerJobResources: make([]database.WorkspaceResource, 0),
|
||||
provisionerJobs: make([]database.ProvisionerJob, 0),
|
||||
templateVersions: make([]database.TemplateVersion, 0),
|
||||
templates: make([]database.Template, 0),
|
||||
workspaceBuilds: make([]database.WorkspaceBuild, 0),
|
||||
workspaceApps: make([]database.WorkspaceApp, 0),
|
||||
workspaces: make([]database.Workspace, 0),
|
||||
auditLogs: make([]database.AuditLog, 0),
|
||||
files: make([]database.File, 0),
|
||||
gitSSHKey: make([]database.GitSSHKey, 0),
|
||||
parameterSchemas: make([]database.ParameterSchema, 0),
|
||||
parameterValues: make([]database.ParameterValue, 0),
|
||||
provisionerDaemons: make([]database.ProvisionerDaemon, 0),
|
||||
provisionerJobAgents: make([]database.WorkspaceAgent, 0),
|
||||
provisionerJobLogs: make([]database.ProvisionerJobLog, 0),
|
||||
provisionerJobResources: make([]database.WorkspaceResource, 0),
|
||||
provisionerJobResourceMetadata: make([]database.WorkspaceResourceMetadatum, 0),
|
||||
provisionerJobs: make([]database.ProvisionerJob, 0),
|
||||
templateVersions: make([]database.TemplateVersion, 0),
|
||||
templates: make([]database.Template, 0),
|
||||
workspaceBuilds: make([]database.WorkspaceBuild, 0),
|
||||
workspaceApps: make([]database.WorkspaceApp, 0),
|
||||
workspaces: make([]database.Workspace, 0),
|
||||
licenses: make([]database.License, 0),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -72,25 +75,29 @@ type data struct {
|
||||
organizations []database.Organization
|
||||
organizationMembers []database.OrganizationMember
|
||||
users []database.User
|
||||
userLinks []database.UserLink
|
||||
|
||||
// New tables
|
||||
auditLogs []database.AuditLog
|
||||
files []database.File
|
||||
gitSSHKey []database.GitSSHKey
|
||||
parameterSchemas []database.ParameterSchema
|
||||
parameterValues []database.ParameterValue
|
||||
provisionerDaemons []database.ProvisionerDaemon
|
||||
provisionerJobAgents []database.WorkspaceAgent
|
||||
provisionerJobLogs []database.ProvisionerJobLog
|
||||
provisionerJobResources []database.WorkspaceResource
|
||||
provisionerJobs []database.ProvisionerJob
|
||||
templateVersions []database.TemplateVersion
|
||||
templates []database.Template
|
||||
workspaceBuilds []database.WorkspaceBuild
|
||||
workspaceApps []database.WorkspaceApp
|
||||
workspaces []database.Workspace
|
||||
auditLogs []database.AuditLog
|
||||
files []database.File
|
||||
gitSSHKey []database.GitSSHKey
|
||||
parameterSchemas []database.ParameterSchema
|
||||
parameterValues []database.ParameterValue
|
||||
provisionerDaemons []database.ProvisionerDaemon
|
||||
provisionerJobAgents []database.WorkspaceAgent
|
||||
provisionerJobLogs []database.ProvisionerJobLog
|
||||
provisionerJobResources []database.WorkspaceResource
|
||||
provisionerJobResourceMetadata []database.WorkspaceResourceMetadatum
|
||||
provisionerJobs []database.ProvisionerJob
|
||||
templateVersions []database.TemplateVersion
|
||||
templates []database.Template
|
||||
workspaceBuilds []database.WorkspaceBuild
|
||||
workspaceApps []database.WorkspaceApp
|
||||
workspaces []database.Workspace
|
||||
licenses []database.License
|
||||
|
||||
deploymentID string
|
||||
deploymentID string
|
||||
lastLicenseID int32
|
||||
}
|
||||
|
||||
// InTx doesn't rollback data properly for in-memory yet.
|
||||
@@ -585,19 +592,45 @@ func (q *fakeQuerier) GetLatestWorkspaceBuildByWorkspaceID(_ context.Context, wo
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
var row database.WorkspaceBuild
|
||||
var buildNum int32
|
||||
var buildNum int32 = -1
|
||||
for _, workspaceBuild := range q.workspaceBuilds {
|
||||
if workspaceBuild.WorkspaceID.String() == workspaceID.String() && workspaceBuild.BuildNumber > buildNum {
|
||||
row = workspaceBuild
|
||||
buildNum = workspaceBuild.BuildNumber
|
||||
}
|
||||
}
|
||||
if buildNum == 0 {
|
||||
if buildNum == -1 {
|
||||
return database.WorkspaceBuild{}, sql.ErrNoRows
|
||||
}
|
||||
return row, nil
|
||||
}
|
||||
|
||||
func (q *fakeQuerier) GetLatestWorkspaceBuilds(_ context.Context) ([]database.WorkspaceBuild, error) {
|
||||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
builds := make(map[uuid.UUID]database.WorkspaceBuild)
|
||||
buildNumbers := make(map[uuid.UUID]int32)
|
||||
for _, workspaceBuild := range q.workspaceBuilds {
|
||||
id := workspaceBuild.WorkspaceID
|
||||
if workspaceBuild.BuildNumber > buildNumbers[id] {
|
||||
builds[id] = workspaceBuild
|
||||
buildNumbers[id] = workspaceBuild.BuildNumber
|
||||
}
|
||||
}
|
||||
var returnBuilds []database.WorkspaceBuild
|
||||
for i, n := range buildNumbers {
|
||||
if n > 0 {
|
||||
b := builds[i]
|
||||
returnBuilds = append(returnBuilds, b)
|
||||
}
|
||||
}
|
||||
if len(returnBuilds) == 0 {
|
||||
return nil, sql.ErrNoRows
|
||||
}
|
||||
return returnBuilds, nil
|
||||
}
|
||||
|
||||
func (q *fakeQuerier) GetLatestWorkspaceBuildsByWorkspaceIDs(_ context.Context, ids []uuid.UUID) ([]database.WorkspaceBuild, error) {
|
||||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
@@ -852,7 +885,9 @@ func (q *fakeQuerier) UpdateTemplateMetaByID(_ context.Context, arg database.Upd
|
||||
continue
|
||||
}
|
||||
tpl.UpdatedAt = database.Now()
|
||||
tpl.Name = arg.Name
|
||||
tpl.Description = arg.Description
|
||||
tpl.Icon = arg.Icon
|
||||
tpl.MaxTtl = arg.MaxTtl
|
||||
tpl.MinAutostartInterval = arg.MinAutostartInterval
|
||||
q.templates[idx] = tpl
|
||||
@@ -895,8 +930,8 @@ func (q *fakeQuerier) GetTemplatesWithFilter(_ context.Context, arg database.Get
|
||||
}
|
||||
if len(templates) > 0 {
|
||||
slices.SortFunc(templates, func(i, j database.Template) bool {
|
||||
if !i.CreatedAt.Before(j.CreatedAt) {
|
||||
return false
|
||||
if i.Name != j.Name {
|
||||
return i.Name < j.Name
|
||||
}
|
||||
return i.ID.String() < j.ID.String()
|
||||
})
|
||||
@@ -1078,8 +1113,8 @@ func (q *fakeQuerier) GetTemplates(_ context.Context) ([]database.Template, erro
|
||||
|
||||
templates := slices.Clone(q.templates)
|
||||
slices.SortFunc(templates, func(i, j database.Template) bool {
|
||||
if !i.CreatedAt.Before(j.CreatedAt) {
|
||||
return false
|
||||
if i.Name != j.Name {
|
||||
return i.Name < j.Name
|
||||
}
|
||||
return i.ID.String() < j.ID.String()
|
||||
})
|
||||
@@ -1228,9 +1263,6 @@ func (q *fakeQuerier) GetWorkspaceAgentsByResourceIDs(_ context.Context, resourc
|
||||
workspaceAgents = append(workspaceAgents, agent)
|
||||
}
|
||||
}
|
||||
if len(workspaceAgents) == 0 {
|
||||
return nil, sql.ErrNoRows
|
||||
}
|
||||
return workspaceAgents, nil
|
||||
}
|
||||
|
||||
@@ -1312,9 +1344,6 @@ func (q *fakeQuerier) GetWorkspaceResourcesByJobID(_ context.Context, jobID uuid
|
||||
}
|
||||
resources = append(resources, resource)
|
||||
}
|
||||
if len(resources) == 0 {
|
||||
return nil, sql.ErrNoRows
|
||||
}
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
@@ -1331,6 +1360,54 @@ func (q *fakeQuerier) GetWorkspaceResourcesCreatedAfter(_ context.Context, after
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
func (q *fakeQuerier) GetWorkspaceResourceMetadataCreatedAfter(ctx context.Context, after time.Time) ([]database.WorkspaceResourceMetadatum, error) {
|
||||
resources, err := q.GetWorkspaceResourcesCreatedAfter(ctx, after)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resourceIDs := map[uuid.UUID]struct{}{}
|
||||
for _, resource := range resources {
|
||||
resourceIDs[resource.ID] = struct{}{}
|
||||
}
|
||||
metadata := make([]database.WorkspaceResourceMetadatum, 0)
|
||||
for _, m := range q.provisionerJobResourceMetadata {
|
||||
_, ok := resourceIDs[m.WorkspaceResourceID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
metadata = append(metadata, m)
|
||||
}
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
func (q *fakeQuerier) GetWorkspaceResourceMetadataByResourceID(_ context.Context, id uuid.UUID) ([]database.WorkspaceResourceMetadatum, error) {
|
||||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
metadata := make([]database.WorkspaceResourceMetadatum, 0)
|
||||
for _, metadatum := range q.provisionerJobResourceMetadata {
|
||||
if metadatum.WorkspaceResourceID.String() == id.String() {
|
||||
metadata = append(metadata, metadatum)
|
||||
}
|
||||
}
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
func (q *fakeQuerier) GetWorkspaceResourceMetadataByResourceIDs(_ context.Context, ids []uuid.UUID) ([]database.WorkspaceResourceMetadatum, error) {
|
||||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
metadata := make([]database.WorkspaceResourceMetadatum, 0)
|
||||
for _, metadatum := range q.provisionerJobResourceMetadata {
|
||||
for _, id := range ids {
|
||||
if metadatum.WorkspaceResourceID.String() == id.String() {
|
||||
metadata = append(metadata, metadatum)
|
||||
}
|
||||
}
|
||||
}
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
func (q *fakeQuerier) GetProvisionerJobsByIDs(_ context.Context, ids []uuid.UUID) ([]database.ProvisionerJob, error) {
|
||||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
@@ -1397,20 +1474,16 @@ func (q *fakeQuerier) InsertAPIKey(_ context.Context, arg database.InsertAPIKeyP
|
||||
|
||||
//nolint:gosimple
|
||||
key := database.APIKey{
|
||||
ID: arg.ID,
|
||||
LifetimeSeconds: arg.LifetimeSeconds,
|
||||
HashedSecret: arg.HashedSecret,
|
||||
IPAddress: arg.IPAddress,
|
||||
UserID: arg.UserID,
|
||||
ExpiresAt: arg.ExpiresAt,
|
||||
CreatedAt: arg.CreatedAt,
|
||||
UpdatedAt: arg.UpdatedAt,
|
||||
LastUsed: arg.LastUsed,
|
||||
LoginType: arg.LoginType,
|
||||
OAuthAccessToken: arg.OAuthAccessToken,
|
||||
OAuthRefreshToken: arg.OAuthRefreshToken,
|
||||
OAuthIDToken: arg.OAuthIDToken,
|
||||
OAuthExpiry: arg.OAuthExpiry,
|
||||
ID: arg.ID,
|
||||
LifetimeSeconds: arg.LifetimeSeconds,
|
||||
HashedSecret: arg.HashedSecret,
|
||||
IPAddress: arg.IPAddress,
|
||||
UserID: arg.UserID,
|
||||
ExpiresAt: arg.ExpiresAt,
|
||||
CreatedAt: arg.CreatedAt,
|
||||
UpdatedAt: arg.UpdatedAt,
|
||||
LastUsed: arg.LastUsed,
|
||||
LoginType: arg.LoginType,
|
||||
}
|
||||
q.apiKeys = append(q.apiKeys, key)
|
||||
return key, nil
|
||||
@@ -1486,10 +1559,6 @@ func (q *fakeQuerier) InsertTemplate(_ context.Context, arg database.InsertTempl
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
// default values
|
||||
if arg.MaxTtl == 0 {
|
||||
arg.MaxTtl = int64(168 * time.Hour)
|
||||
}
|
||||
if arg.MinAutostartInterval == 0 {
|
||||
arg.MinAutostartInterval = int64(time.Hour)
|
||||
}
|
||||
@@ -1659,6 +1728,21 @@ func (q *fakeQuerier) InsertWorkspaceResource(_ context.Context, arg database.In
|
||||
return resource, nil
|
||||
}
|
||||
|
||||
func (q *fakeQuerier) InsertWorkspaceResourceMetadata(_ context.Context, arg database.InsertWorkspaceResourceMetadataParams) (database.WorkspaceResourceMetadatum, error) {
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
//nolint:gosimple
|
||||
metadatum := database.WorkspaceResourceMetadatum{
|
||||
WorkspaceResourceID: arg.WorkspaceResourceID,
|
||||
Key: arg.Key,
|
||||
Value: arg.Value,
|
||||
Sensitive: arg.Sensitive,
|
||||
}
|
||||
q.provisionerJobResourceMetadata = append(q.provisionerJobResourceMetadata, metadatum)
|
||||
return metadatum, nil
|
||||
}
|
||||
|
||||
func (q *fakeQuerier) InsertUser(_ context.Context, arg database.InsertUserParams) (database.User, error) {
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
@@ -1672,6 +1756,7 @@ func (q *fakeQuerier) InsertUser(_ context.Context, arg database.InsertUserParam
|
||||
Username: arg.Username,
|
||||
Status: database.UserStatusActive,
|
||||
RBACRoles: arg.RBACRoles,
|
||||
LoginType: arg.LoginType,
|
||||
}
|
||||
q.users = append(q.users, user)
|
||||
return user, nil
|
||||
@@ -1827,9 +1912,6 @@ func (q *fakeQuerier) UpdateAPIKeyByID(_ context.Context, arg database.UpdateAPI
|
||||
apiKey.LastUsed = arg.LastUsed
|
||||
apiKey.ExpiresAt = arg.ExpiresAt
|
||||
apiKey.IPAddress = arg.IPAddress
|
||||
apiKey.OAuthAccessToken = arg.OAuthAccessToken
|
||||
apiKey.OAuthRefreshToken = arg.OAuthRefreshToken
|
||||
apiKey.OAuthExpiry = arg.OAuthExpiry
|
||||
q.apiKeys[index] = apiKey
|
||||
return nil
|
||||
}
|
||||
@@ -1999,6 +2081,32 @@ func (q *fakeQuerier) UpdateProvisionerJobWithCompleteByID(_ context.Context, ar
|
||||
return sql.ErrNoRows
|
||||
}
|
||||
|
||||
func (q *fakeQuerier) UpdateWorkspace(_ context.Context, arg database.UpdateWorkspaceParams) (database.Workspace, error) {
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
for i, workspace := range q.workspaces {
|
||||
if workspace.Deleted || workspace.ID != arg.ID {
|
||||
continue
|
||||
}
|
||||
for _, other := range q.workspaces {
|
||||
if other.Deleted || other.ID == workspace.ID || workspace.OwnerID != other.OwnerID {
|
||||
continue
|
||||
}
|
||||
if other.Name == arg.Name {
|
||||
return database.Workspace{}, &pq.Error{Code: "23505", Message: "duplicate key value violates unique constraint"}
|
||||
}
|
||||
}
|
||||
|
||||
workspace.Name = arg.Name
|
||||
q.workspaces[i] = workspace
|
||||
|
||||
return workspace, nil
|
||||
}
|
||||
|
||||
return database.Workspace{}, sql.ErrNoRows
|
||||
}
|
||||
|
||||
func (q *fakeQuerier) UpdateWorkspaceAutostart(_ context.Context, arg database.UpdateWorkspaceAutostartParams) error {
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
@@ -2188,3 +2296,119 @@ func (q *fakeQuerier) GetDeploymentID(_ context.Context) (string, error) {
|
||||
|
||||
return q.deploymentID, nil
|
||||
}
|
||||
|
||||
func (q *fakeQuerier) InsertLicense(
|
||||
_ context.Context, arg database.InsertLicenseParams) (database.License, error) {
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
l := database.License{
|
||||
ID: q.lastLicenseID + 1,
|
||||
UploadedAt: arg.UploadedAt,
|
||||
JWT: arg.JWT,
|
||||
Exp: arg.Exp,
|
||||
}
|
||||
q.lastLicenseID = l.ID
|
||||
q.licenses = append(q.licenses, l)
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func (q *fakeQuerier) GetLicenses(_ context.Context) ([]database.License, error) {
|
||||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
results := append([]database.License{}, q.licenses...)
|
||||
sort.Slice(results, func(i, j int) bool { return results[i].ID < results[j].ID })
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (q *fakeQuerier) DeleteLicense(_ context.Context, id int32) (int32, error) {
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
for index, l := range q.licenses {
|
||||
if l.ID == id {
|
||||
q.licenses[index] = q.licenses[len(q.licenses)-1]
|
||||
q.licenses = q.licenses[:len(q.licenses)-1]
|
||||
return id, nil
|
||||
}
|
||||
}
|
||||
return 0, sql.ErrNoRows
|
||||
}
|
||||
|
||||
func (q *fakeQuerier) GetUserLinkByLinkedID(_ context.Context, id string) (database.UserLink, error) {
|
||||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
for _, link := range q.userLinks {
|
||||
if link.LinkedID == id {
|
||||
return link, nil
|
||||
}
|
||||
}
|
||||
return database.UserLink{}, sql.ErrNoRows
|
||||
}
|
||||
|
||||
func (q *fakeQuerier) GetUserLinkByUserIDLoginType(_ context.Context, params database.GetUserLinkByUserIDLoginTypeParams) (database.UserLink, error) {
|
||||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
for _, link := range q.userLinks {
|
||||
if link.UserID == params.UserID && link.LoginType == params.LoginType {
|
||||
return link, nil
|
||||
}
|
||||
}
|
||||
return database.UserLink{}, sql.ErrNoRows
|
||||
}
|
||||
|
||||
func (q *fakeQuerier) InsertUserLink(_ context.Context, args database.InsertUserLinkParams) (database.UserLink, error) {
|
||||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
//nolint:gosimple
|
||||
link := database.UserLink{
|
||||
UserID: args.UserID,
|
||||
LoginType: args.LoginType,
|
||||
LinkedID: args.LinkedID,
|
||||
OAuthAccessToken: args.OAuthAccessToken,
|
||||
OAuthRefreshToken: args.OAuthRefreshToken,
|
||||
OAuthExpiry: args.OAuthExpiry,
|
||||
}
|
||||
|
||||
q.userLinks = append(q.userLinks, link)
|
||||
|
||||
return link, nil
|
||||
}
|
||||
|
||||
func (q *fakeQuerier) UpdateUserLinkedID(_ context.Context, params database.UpdateUserLinkedIDParams) (database.UserLink, error) {
|
||||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
for i, link := range q.userLinks {
|
||||
if link.UserID == params.UserID && link.LoginType == params.LoginType {
|
||||
link.LinkedID = params.LinkedID
|
||||
|
||||
q.userLinks[i] = link
|
||||
return link, nil
|
||||
}
|
||||
}
|
||||
|
||||
return database.UserLink{}, sql.ErrNoRows
|
||||
}
|
||||
|
||||
func (q *fakeQuerier) UpdateUserLink(_ context.Context, params database.UpdateUserLinkParams) (database.UserLink, error) {
|
||||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
for i, link := range q.userLinks {
|
||||
if link.UserID == params.UserID && link.LoginType == params.LoginType {
|
||||
link.OAuthAccessToken = params.OAuthAccessToken
|
||||
link.OAuthRefreshToken = params.OAuthRefreshToken
|
||||
link.OAuthExpiry = params.OAuthExpiry
|
||||
|
||||
q.userLinks[i] = link
|
||||
return link, nil
|
||||
}
|
||||
}
|
||||
|
||||
return database.UserLink{}, sql.ErrNoRows
|
||||
}
|
||||
|
||||
@@ -37,6 +37,7 @@ func TestNestedInTx(t *testing.T) {
|
||||
CreatedAt: database.Now(),
|
||||
UpdatedAt: database.Now(),
|
||||
RBACRoles: []string{},
|
||||
LoginType: database.LoginTypeGithub,
|
||||
})
|
||||
return err
|
||||
})
|
||||
|
||||
Generated
+40
-8
@@ -96,10 +96,6 @@ CREATE TABLE api_keys (
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
login_type login_type NOT NULL,
|
||||
oauth_access_token text DEFAULT ''::text NOT NULL,
|
||||
oauth_refresh_token text DEFAULT ''::text NOT NULL,
|
||||
oauth_id_token text DEFAULT ''::text NOT NULL,
|
||||
oauth_expiry timestamp with time zone DEFAULT '0001-01-01 00:00:00+00'::timestamp with time zone NOT NULL,
|
||||
lifetime_seconds bigint DEFAULT 86400 NOT NULL,
|
||||
ip_address inet DEFAULT '0.0.0.0'::inet NOT NULL
|
||||
);
|
||||
@@ -137,10 +133,13 @@ CREATE TABLE gitsshkeys (
|
||||
|
||||
CREATE TABLE licenses (
|
||||
id integer NOT NULL,
|
||||
license jsonb NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL
|
||||
uploaded_at timestamp with time zone NOT NULL,
|
||||
jwt text NOT NULL,
|
||||
exp timestamp with time zone NOT NULL
|
||||
);
|
||||
|
||||
COMMENT ON COLUMN licenses.exp IS 'exp tracks the claim of the same name in the JWT, and we include it here so that we can easily query for licenses that have not yet expired.';
|
||||
|
||||
CREATE SEQUENCE licenses_id_seq
|
||||
AS integer
|
||||
START WITH 1
|
||||
@@ -264,7 +263,17 @@ CREATE TABLE templates (
|
||||
description character varying(128) DEFAULT ''::character varying NOT NULL,
|
||||
max_ttl bigint DEFAULT '604800000000000'::bigint NOT NULL,
|
||||
min_autostart_interval bigint DEFAULT '3600000000000'::bigint NOT NULL,
|
||||
created_by uuid NOT NULL
|
||||
created_by uuid NOT NULL,
|
||||
icon character varying(256) DEFAULT ''::character varying NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE user_links (
|
||||
user_id uuid NOT NULL,
|
||||
login_type login_type NOT NULL,
|
||||
linked_id text DEFAULT ''::text NOT NULL,
|
||||
oauth_access_token text DEFAULT ''::text NOT NULL,
|
||||
oauth_refresh_token text DEFAULT ''::text NOT NULL,
|
||||
oauth_expiry timestamp with time zone DEFAULT '0001-01-01 00:00:00+00'::timestamp with time zone NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE users (
|
||||
@@ -275,7 +284,8 @@ CREATE TABLE users (
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
status user_status DEFAULT 'active'::public.user_status NOT NULL,
|
||||
rbac_roles text[] DEFAULT '{}'::text[] NOT NULL
|
||||
rbac_roles text[] DEFAULT '{}'::text[] NOT NULL,
|
||||
login_type login_type DEFAULT 'password'::public.login_type NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE workspace_agents (
|
||||
@@ -328,6 +338,13 @@ CREATE TABLE workspace_builds (
|
||||
reason build_reason DEFAULT 'initiator'::public.build_reason NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE workspace_resource_metadata (
|
||||
workspace_resource_id uuid NOT NULL,
|
||||
key character varying(1024) NOT NULL,
|
||||
value character varying(65536),
|
||||
sensitive boolean NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE workspace_resources (
|
||||
id uuid NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
@@ -364,6 +381,9 @@ ALTER TABLE ONLY files
|
||||
ALTER TABLE ONLY gitsshkeys
|
||||
ADD CONSTRAINT gitsshkeys_pkey PRIMARY KEY (user_id);
|
||||
|
||||
ALTER TABLE ONLY licenses
|
||||
ADD CONSTRAINT licenses_jwt_key UNIQUE (jwt);
|
||||
|
||||
ALTER TABLE ONLY licenses
|
||||
ADD CONSTRAINT licenses_pkey PRIMARY KEY (id);
|
||||
|
||||
@@ -409,6 +429,9 @@ ALTER TABLE ONLY template_versions
|
||||
ALTER TABLE ONLY templates
|
||||
ADD CONSTRAINT templates_pkey PRIMARY KEY (id);
|
||||
|
||||
ALTER TABLE ONLY user_links
|
||||
ADD CONSTRAINT user_links_pkey PRIMARY KEY (user_id, login_type);
|
||||
|
||||
ALTER TABLE ONLY users
|
||||
ADD CONSTRAINT users_pkey PRIMARY KEY (id);
|
||||
|
||||
@@ -433,6 +456,9 @@ ALTER TABLE ONLY workspace_builds
|
||||
ALTER TABLE ONLY workspace_builds
|
||||
ADD CONSTRAINT workspace_builds_workspace_id_name_key UNIQUE (workspace_id, name);
|
||||
|
||||
ALTER TABLE ONLY workspace_resource_metadata
|
||||
ADD CONSTRAINT workspace_resource_metadata_pkey PRIMARY KEY (workspace_resource_id, key);
|
||||
|
||||
ALTER TABLE ONLY workspace_resources
|
||||
ADD CONSTRAINT workspace_resources_pkey PRIMARY KEY (id);
|
||||
|
||||
@@ -503,6 +529,9 @@ ALTER TABLE ONLY templates
|
||||
ALTER TABLE ONLY templates
|
||||
ADD CONSTRAINT templates_organization_id_fkey FOREIGN KEY (organization_id) REFERENCES organizations(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY user_links
|
||||
ADD CONSTRAINT user_links_user_id_fkey FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY workspace_agents
|
||||
ADD CONSTRAINT workspace_agents_resource_id_fkey FOREIGN KEY (resource_id) REFERENCES workspace_resources(id) ON DELETE CASCADE;
|
||||
|
||||
@@ -518,6 +547,9 @@ ALTER TABLE ONLY workspace_builds
|
||||
ALTER TABLE ONLY workspace_builds
|
||||
ADD CONSTRAINT workspace_builds_workspace_id_fkey FOREIGN KEY (workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY workspace_resource_metadata
|
||||
ADD CONSTRAINT workspace_resource_metadata_workspace_resource_id_fkey FOREIGN KEY (workspace_resource_id) REFERENCES workspace_resources(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY workspace_resources
|
||||
ADD CONSTRAINT workspace_resources_job_id_fkey FOREIGN KEY (job_id) REFERENCES provisioner_jobs(id) ON DELETE CASCADE;
|
||||
|
||||
|
||||
@@ -0,0 +1,29 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
// IsUniqueViolation checks if the error is due to a unique violation.
|
||||
// If one or more specific unique constraints are given as arguments,
|
||||
// the error must be caused by one of them. If no constraints are given,
|
||||
// this function returns true for any unique violation.
|
||||
func IsUniqueViolation(err error, uniqueConstraints ...UniqueConstraint) bool {
|
||||
var pqErr *pq.Error
|
||||
if errors.As(err, &pqErr) {
|
||||
if pqErr.Code.Name() == "unique_violation" {
|
||||
if len(uniqueConstraints) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, uc := range uniqueConstraints {
|
||||
if pqErr.Constraint == string(uc) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -41,7 +41,6 @@ func main() {
|
||||
connection,
|
||||
"--no-privileges",
|
||||
"--no-owner",
|
||||
"--no-comments",
|
||||
|
||||
// We never want to manually generate
|
||||
// queries executing against this table.
|
||||
@@ -89,7 +88,7 @@ func main() {
|
||||
if !ok {
|
||||
panic("couldn't get caller path")
|
||||
}
|
||||
err = os.WriteFile(filepath.Join(mainPath, "..", "..", "dump.sql"), []byte(dump), 0600)
|
||||
err = os.WriteFile(filepath.Join(mainPath, "..", "..", "..", "dump.sql"), []byte(dump), 0o600)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -0,0 +1,120 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
const header = `// Code generated by gen/enum. DO NOT EDIT.
|
||||
package database
|
||||
`
|
||||
|
||||
func main() {
|
||||
if err := run(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func run() error {
|
||||
dump, err := os.Open("dump.sql")
|
||||
if err != nil {
|
||||
_, _ = fmt.Fprintf(os.Stderr, "error: %s must be run in the database directory with dump.sql present\n", os.Args[0])
|
||||
return err
|
||||
}
|
||||
defer dump.Close()
|
||||
|
||||
var uniqueConstraints []string
|
||||
|
||||
s := bufio.NewScanner(dump)
|
||||
query := ""
|
||||
for s.Scan() {
|
||||
line := strings.TrimSpace(s.Text())
|
||||
switch {
|
||||
case strings.HasPrefix(line, "--"):
|
||||
case line == "":
|
||||
case strings.HasSuffix(line, ";"):
|
||||
query += line
|
||||
if isUniqueConstraint(query) {
|
||||
uniqueConstraints = append(uniqueConstraints, query)
|
||||
}
|
||||
query = ""
|
||||
default:
|
||||
query += line + " "
|
||||
}
|
||||
}
|
||||
if err = s.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writeContents("unique_constraint.go", uniqueConstraints, generateUniqueConstraints)
|
||||
}
|
||||
|
||||
func isUniqueConstraint(query string) bool {
|
||||
return strings.Contains(query, "UNIQUE")
|
||||
}
|
||||
|
||||
func generateUniqueConstraints(queries []string) ([]byte, error) {
|
||||
s := &bytes.Buffer{}
|
||||
|
||||
_, _ = fmt.Fprint(s, header)
|
||||
_, _ = fmt.Fprint(s, `
|
||||
// UniqueConstraint represents a named unique constraint on a table.
|
||||
type UniqueConstraint string
|
||||
|
||||
// UniqueConstraint enums.
|
||||
const (
|
||||
`)
|
||||
for _, query := range queries {
|
||||
name := ""
|
||||
switch {
|
||||
case strings.Contains(query, "ALTER TABLE") && strings.Contains(query, "ADD CONSTRAINT"):
|
||||
name = strings.Split(query, " ")[6]
|
||||
case strings.Contains(query, "CREATE UNIQUE INDEX"):
|
||||
name = strings.Split(query, " ")[3]
|
||||
default:
|
||||
return nil, xerrors.Errorf("unknown unique constraint format: %s", query)
|
||||
}
|
||||
_, _ = fmt.Fprintf(s, "\tUnique%s UniqueConstraint = %q // %s\n", nameFromSnakeCase(name), name, query)
|
||||
}
|
||||
_, _ = fmt.Fprint(s, ")\n")
|
||||
|
||||
return s.Bytes(), nil
|
||||
}
|
||||
|
||||
func writeContents[T any](dest string, arg T, fn func(T) ([]byte, error)) error {
|
||||
b, err := fn(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(dest, b, 0o600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cmd := exec.Command("goimports", "-w", dest)
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func nameFromSnakeCase(s string) string {
|
||||
var ret string
|
||||
for _, ss := range strings.Split(s, "_") {
|
||||
switch ss {
|
||||
case "id":
|
||||
ret += "ID"
|
||||
case "ids":
|
||||
ret += "IDs"
|
||||
case "jwt":
|
||||
ret += "JWT"
|
||||
case "idx":
|
||||
ret += "Index"
|
||||
default:
|
||||
ret += strings.Title(ss)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
@@ -13,6 +13,8 @@ SCRIPT_DIR=$(dirname "${BASH_SOURCE[0]}")
|
||||
(
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
# Dump the updated schema.
|
||||
go run gen/dump/main.go
|
||||
# The logic below depends on the exact version being correct :(
|
||||
go run github.com/kyleconroy/sqlc/cmd/sqlc@v1.13.0 generate
|
||||
|
||||
@@ -47,4 +49,7 @@ SCRIPT_DIR=$(dirname "${BASH_SOURCE[0]}")
|
||||
# suggestions.
|
||||
go mod download
|
||||
goimports -w queries.sql.go
|
||||
|
||||
# Generate enums (e.g. unique constraints).
|
||||
go run gen/enum/main.go
|
||||
)
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE IF EXISTS workspace_resource_metadata;
|
||||
@@ -0,0 +1,8 @@
|
||||
CREATE TABLE IF NOT EXISTS workspace_resource_metadata (
|
||||
workspace_resource_id uuid NOT NULL,
|
||||
key varchar(1024) NOT NULL,
|
||||
value varchar(65536),
|
||||
sensitive boolean NOT NULL,
|
||||
PRIMARY KEY (workspace_resource_id, key),
|
||||
FOREIGN KEY (workspace_resource_id) REFERENCES workspace_resources (id) ON DELETE CASCADE
|
||||
);
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user