Compare commits
185 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 6f68315f3b | |||
| 3716afac46 | |||
| 0f63510d0d | |||
| 003dc5cc03 | |||
| 190cd1c713 | |||
| 0ef85147cd | |||
| 0f8251be41 | |||
| 10c958bba1 | |||
| 043f4f5327 | |||
| 13e5c51c30 | |||
| 9596f236c1 | |||
| 0f414a00d3 | |||
| a74273f1fd | |||
| c90be9b0c1 | |||
| 851df91991 | |||
| 628750232f | |||
| 4672849d05 | |||
| d2a22c538b | |||
| 6bc93520c4 | |||
| cd38e297b6 | |||
| ef7fcf3930 | |||
| 49afab12d5 | |||
| 4b5c45d6df | |||
| e65eb0321c | |||
| 6dbfe6f7ae | |||
| 15d74a11a0 | |||
| f3ea740b27 | |||
| b96ac677f1 | |||
| 54fe082551 | |||
| 7667d64686 | |||
| f24cb5cc96 | |||
| c597c9260d | |||
| 839918c5e7 | |||
| 8c15192433 | |||
| b36d979a60 | |||
| f3c76ce244 | |||
| 63fe2305f8 | |||
| 47f2c7d683 | |||
| 0afff43f9d | |||
| 499769187b | |||
| 88d7181a47 | |||
| 83f9ea17b4 | |||
| 93eef7b542 | |||
| fb6b954222 | |||
| ded612d3ec | |||
| 6914862903 | |||
| c8eacc6df7 | |||
| af125c3795 | |||
| cb6a47227f | |||
| 4bd7fe8506 | |||
| 53e5746636 | |||
| a4d785dec5 | |||
| d4adfa3902 | |||
| 99e103e790 | |||
| 4cc26be5ec | |||
| 5710a98714 | |||
| b0084e2229 | |||
| d52bc91e48 | |||
| 337ee3544b | |||
| aeb4040958 | |||
| c818b4ddd4 | |||
| 046c1c4228 | |||
| 3e5cfa9e45 | |||
| fbec45b807 | |||
| cc944209ae | |||
| 82e6070c7a | |||
| 3514ca3476 | |||
| e8c59a1d9d | |||
| d7800a43e9 | |||
| 9f4f88f38c | |||
| a359879af5 | |||
| fa733318e0 | |||
| 6960d194ae | |||
| 9c8c6a952d | |||
| d9f419308a | |||
| b6d35edebd | |||
| 03f05e25f6 | |||
| cca4519420 | |||
| 2bef1752f1 | |||
| 40baa5bc72 | |||
| cf8be4eac5 | |||
| 0b2ba96065 | |||
| 6f9b3c1592 | |||
| f8f3d8967e | |||
| 4446d61fcd | |||
| 1c3dc8392e | |||
| fa59b30cfb | |||
| f007c90a30 | |||
| 10327fb3a9 | |||
| 755afa31cf | |||
| 422e044859 | |||
| c3ef7dc33b | |||
| d0f36dc6ba | |||
| cba6e93176 | |||
| bec6a26d0e | |||
| 8c4d726cf6 | |||
| fc3b2ff06c | |||
| 0613797934 | |||
| 363a016281 | |||
| 979430d635 | |||
| 7142cbb9e6 | |||
| 2c150d03f6 | |||
| 9b9496cf4d | |||
| a62e69d34a | |||
| 91a74f0ead | |||
| 4db8fa661e | |||
| 95a7c0c4f0 | |||
| db2d0596d4 | |||
| f2a96ac984 | |||
| 82cb6ef7ec | |||
| d15f16fa2e | |||
| 7b09d98238 | |||
| 83ccdaa755 | |||
| f619500833 | |||
| 8563b372e8 | |||
| 4fc047954e | |||
| 6f1951e1c8 | |||
| 86b9c97e8e | |||
| e978d4d9ac | |||
| c90e6d7b47 | |||
| 84fdfd2a18 | |||
| 712a1b50d8 | |||
| ccc664de37 | |||
| f1feb40e17 | |||
| 48f29a1995 | |||
| 6f9b1a39f4 | |||
| 60218c4c78 | |||
| 76722a7db5 | |||
| 4c7132f08b | |||
| 59a80d70dc | |||
| 9715ae5932 | |||
| 8af8c77e2a | |||
| 0338250d86 | |||
| 73402fc2f7 | |||
| ba4186dacc | |||
| 0b9ed57c10 | |||
| c648c548d8 | |||
| 21942afef3 | |||
| aaa5174bef | |||
| 591385f2ca | |||
| 27b8f201a4 | |||
| abbcffe181 | |||
| 9a47ea1279 | |||
| 6019d0ba96 | |||
| d6c4d47229 | |||
| 2e05329111 | |||
| 238e9956f4 | |||
| d79a7adf99 | |||
| f50e1d5a9a | |||
| 2c13797350 | |||
| d0feb70811 | |||
| b55a7a8b78 | |||
| 8c0565177e | |||
| c6076d2d0d | |||
| e09ad1ddc1 | |||
| 46becc7201 | |||
| 373b36c3c9 | |||
| 3b53f5ab47 | |||
| ff785588fe | |||
| fab196043e | |||
| 49feb12a7f | |||
| 89e6afbc5e | |||
| 58428aafce | |||
| 70a694ed4c | |||
| 097f739492 | |||
| 0ad5f6067d | |||
| 173dc0e35f | |||
| a77a9ab0a6 | |||
| 203f48af56 | |||
| b80d99550a | |||
| 4e0cb60eeb | |||
| dfeafa8f5a | |||
| efbd6257e4 | |||
| f9b660e573 | |||
| fce14fb9ad | |||
| 33beb9bd70 | |||
| 96642382b3 | |||
| 25c83cf0b1 | |||
| e398309a8f | |||
| e164b1e71c | |||
| 49a2880abc | |||
| 8acc7f2070 | |||
| 42336eef4a | |||
| dda9c56098 | |||
| e0351124b2 |
@@ -1,13 +1,13 @@
|
||||
{
|
||||
"name": "Development environments on your infrastructure",
|
||||
"image": "codercom/oss-dogfood:latest",
|
||||
"name": "Development environments on your infrastructure",
|
||||
"image": "codercom/oss-dogfood:latest",
|
||||
|
||||
"features": {
|
||||
// See all possible options here https://github.com/devcontainers/features/tree/main/src/docker-in-docker
|
||||
"ghcr.io/devcontainers/features/docker-in-docker:2": {
|
||||
"moby": "false"
|
||||
}
|
||||
},
|
||||
// SYS_PTRACE to enable go debugging
|
||||
"runArgs": ["--cap-add=SYS_PTRACE"]
|
||||
"features": {
|
||||
// See all possible options here https://github.com/devcontainers/features/tree/main/src/docker-in-docker
|
||||
"ghcr.io/devcontainers/features/docker-in-docker:2": {
|
||||
"moby": "false"
|
||||
}
|
||||
},
|
||||
// SYS_PTRACE to enable go debugging
|
||||
"runArgs": ["--cap-add=SYS_PTRACE"]
|
||||
}
|
||||
|
||||
+1
-1
@@ -7,7 +7,7 @@ trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
indent_style = tab
|
||||
|
||||
[*.{md,json,yaml,yml,tf,tfvars,nix}]
|
||||
[*.{yaml,yml,tf,tfvars,nix}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
|
||||
@@ -3,3 +3,5 @@
|
||||
|
||||
# chore: format code with semicolons when using prettier (#9555)
|
||||
988c9af0153561397686c119da9d1336d2433fdd
|
||||
# chore: use tabs for prettier and biome (#14283)
|
||||
95a7c0c4f087744a22c2e88dd3c5d30024d5fb02
|
||||
|
||||
+2
-2
@@ -1,7 +1,7 @@
|
||||
# Generated files
|
||||
coderd/apidoc/docs.go linguist-generated=true
|
||||
docs/api/*.md linguist-generated=true
|
||||
docs/cli/*.md linguist-generated=true
|
||||
docs/reference/api/*.md linguist-generated=true
|
||||
docs/reference/cli/*.md linguist-generated=true
|
||||
coderd/apidoc/swagger.json linguist-generated=true
|
||||
coderd/database/dump.sql linguist-generated=true
|
||||
peerbroker/proto/*.go linguist-generated=true
|
||||
|
||||
+8
-19
@@ -86,37 +86,26 @@ updates:
|
||||
- "@mui*"
|
||||
react:
|
||||
patterns:
|
||||
- "react*"
|
||||
- "@types/react*"
|
||||
- "react"
|
||||
- "react-dom"
|
||||
- "@types/react"
|
||||
- "@types/react-dom"
|
||||
emotion:
|
||||
patterns:
|
||||
- "@emotion*"
|
||||
eslint:
|
||||
patterns:
|
||||
- "eslint*"
|
||||
- "@typescript-eslint*"
|
||||
exclude-patterns:
|
||||
- "jest-runner-eslint"
|
||||
jest:
|
||||
patterns:
|
||||
- "jest*"
|
||||
- "jest"
|
||||
- "@types/jest"
|
||||
vite:
|
||||
patterns:
|
||||
- "vite*"
|
||||
- "@vitejs/plugin-react"
|
||||
ignore:
|
||||
# Ignore patch updates for all dependencies
|
||||
# Ignore major version updates to avoid breaking changes
|
||||
- dependency-name: "*"
|
||||
update-types:
|
||||
- version-update:semver-patch
|
||||
# Ignore major updates to Node.js types, because they need to
|
||||
# correspond to the Node.js engine version
|
||||
- dependency-name: "@types/node"
|
||||
update-types:
|
||||
- version-update:semver-major
|
||||
# Ignore @storybook updates, run `pnpm dlx storybook@latest upgrade` to upgrade manually
|
||||
- dependency-name: "*storybook*" # matches @storybook/* and storybook*
|
||||
update-types:
|
||||
- version-update:semver-major
|
||||
- version-update:semver-minor
|
||||
- version-update:semver-patch
|
||||
open-pull-requests-limit: 15
|
||||
|
||||
@@ -168,7 +168,7 @@ jobs:
|
||||
|
||||
- name: Get golangci-lint cache dir
|
||||
run: |
|
||||
linter_ver=$(egrep -o 'GOLANGCI_LINT_VERSION=\S+' dogfood/Dockerfile | cut -d '=' -f 2)
|
||||
linter_ver=$(egrep -o 'GOLANGCI_LINT_VERSION=\S+' dogfood/contents/Dockerfile | cut -d '=' -f 2)
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$linter_ver
|
||||
dir=$(golangci-lint cache status | awk '/Dir/ { print $2 }')
|
||||
echo "LINT_CACHE_DIR=$dir" >> $GITHUB_ENV
|
||||
@@ -184,7 +184,7 @@ jobs:
|
||||
|
||||
# Check for any typos
|
||||
- name: Check for typos
|
||||
uses: crate-ci/typos@v1.23.5
|
||||
uses: crate-ci/typos@v1.23.6
|
||||
with:
|
||||
config: .github/workflows/typos.toml
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ jobs:
|
||||
steps:
|
||||
- name: cla
|
||||
if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target'
|
||||
uses: contributor-assistant/github-action@v2.4.0
|
||||
uses: contributor-assistant/github-action@v2.5.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# the below token should have repo scope and must be manually added by you in the repository's secret
|
||||
|
||||
@@ -17,6 +17,10 @@ on:
|
||||
- "flake.nix"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
# Necessary for GCP authentication (https://github.com/google-github-actions/setup-gcloud#usage)
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
build_image:
|
||||
if: github.actor != 'dependabot[bot]' # Skip Dependabot PRs
|
||||
@@ -56,7 +60,7 @@ jobs:
|
||||
project: b4q6ltmpzh
|
||||
token: ${{ secrets.DEPOT_TOKEN }}
|
||||
buildx-fallback: true
|
||||
context: "{{defaultContext}}:dogfood"
|
||||
context: "{{defaultContext}}:dogfood/contents"
|
||||
pull: true
|
||||
save: true
|
||||
push: ${{ github.ref == 'refs/heads/main' }}
|
||||
@@ -69,7 +73,7 @@ jobs:
|
||||
token: ${{ secrets.DEPOT_TOKEN }}
|
||||
buildx-fallback: true
|
||||
context: "."
|
||||
file: "dogfood/Dockerfile.nix"
|
||||
file: "dogfood/contents/Dockerfile.nix"
|
||||
pull: true
|
||||
save: true
|
||||
push: ${{ github.ref == 'refs/heads/main' }}
|
||||
@@ -85,11 +89,20 @@ jobs:
|
||||
- name: Setup Terraform
|
||||
uses: ./.github/actions/setup-tf
|
||||
|
||||
- name: Authenticate to Google Cloud
|
||||
uses: google-github-actions/auth@v2
|
||||
with:
|
||||
workload_identity_provider: projects/573722524737/locations/global/workloadIdentityPools/github/providers/github
|
||||
service_account: coder-ci@coder-dogfood.iam.gserviceaccount.com
|
||||
|
||||
- name: Terraform init and validate
|
||||
run: |
|
||||
cd dogfood
|
||||
terraform init -upgrade
|
||||
terraform validate
|
||||
cd contents
|
||||
terraform init -upgrade
|
||||
terraform validate
|
||||
|
||||
- name: Get short commit SHA
|
||||
if: github.ref == 'refs/heads/main'
|
||||
@@ -101,22 +114,18 @@ jobs:
|
||||
id: message
|
||||
run: echo "pr_title=$(git log --format=%s -n 1 ${{ github.sha }})" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "Get latest Coder binary from the server"
|
||||
if: github.ref == 'refs/heads/main'
|
||||
run: |
|
||||
curl -fsSL "https://dev.coder.com/bin/coder-linux-amd64" -o "./coder"
|
||||
chmod +x "./coder"
|
||||
|
||||
- name: "Push template"
|
||||
if: github.ref == 'refs/heads/main'
|
||||
run: |
|
||||
./coder templates push $CODER_TEMPLATE_NAME --directory $CODER_TEMPLATE_DIR --yes --name=$CODER_TEMPLATE_VERSION --message="$CODER_TEMPLATE_MESSAGE"
|
||||
cd dogfood
|
||||
terraform apply -auto-approve
|
||||
env:
|
||||
# Consumed by Coder CLI
|
||||
# Consumed by coderd provider
|
||||
CODER_URL: https://dev.coder.com
|
||||
CODER_SESSION_TOKEN: ${{ secrets.CODER_SESSION_TOKEN }}
|
||||
# Template source & details
|
||||
CODER_TEMPLATE_NAME: ${{ secrets.CODER_TEMPLATE_NAME }}
|
||||
CODER_TEMPLATE_VERSION: ${{ steps.vars.outputs.sha_short }}
|
||||
CODER_TEMPLATE_DIR: ./dogfood
|
||||
CODER_TEMPLATE_MESSAGE: ${{ steps.message.outputs.pr_title }}
|
||||
TF_VAR_CODER_TEMPLATE_NAME: ${{ secrets.CODER_TEMPLATE_NAME }}
|
||||
TF_VAR_CODER_TEMPLATE_VERSION: ${{ steps.vars.outputs.sha_short }}
|
||||
TF_VAR_CODER_TEMPLATE_DIR: ./contents
|
||||
TF_VAR_CODER_TEMPLATE_MESSAGE: ${{ steps.message.outputs.pr_title }}
|
||||
TF_LOG: info
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
# Workflow for serving the webapp locally & running Meticulous tests against it.
|
||||
|
||||
name: Meticulous
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "site/**"
|
||||
pull_request:
|
||||
paths:
|
||||
- "site/**"
|
||||
# Meticulous needs the workflow to be triggered on workflow_dispatch events,
|
||||
# so that Meticulous can run the workflow on the base commit to compare
|
||||
# against if an existing workflow hasn't run.
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
actions: write
|
||||
contents: read
|
||||
issues: write
|
||||
pull-requests: write
|
||||
statuses: read
|
||||
|
||||
jobs:
|
||||
meticulous:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: "Checkout Repository"
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
- name: Build
|
||||
working-directory: ./site
|
||||
run: pnpm build
|
||||
- name: Serve
|
||||
working-directory: ./site
|
||||
run: |
|
||||
pnpm vite preview &
|
||||
sleep 5
|
||||
- name: Run Meticulous tests
|
||||
uses: alwaysmeticulous/report-diffs-action/cloud-compute@v1
|
||||
with:
|
||||
api-token: ${{ secrets.METICULOUS_API_TOKEN }}
|
||||
app-url: "http://127.0.0.1:4173/"
|
||||
@@ -1,26 +1,26 @@
|
||||
{
|
||||
"ignorePatterns": [
|
||||
{
|
||||
"pattern": "://localhost"
|
||||
},
|
||||
{
|
||||
"pattern": "://.*.?example\\.com"
|
||||
},
|
||||
{
|
||||
"pattern": "developer.github.com"
|
||||
},
|
||||
{
|
||||
"pattern": "docs.github.com"
|
||||
},
|
||||
{
|
||||
"pattern": "support.google.com"
|
||||
},
|
||||
{
|
||||
"pattern": "tailscale.com"
|
||||
},
|
||||
{
|
||||
"pattern": "wireguard.com"
|
||||
}
|
||||
],
|
||||
"aliveStatusCodes": [200, 0]
|
||||
"ignorePatterns": [
|
||||
{
|
||||
"pattern": "://localhost"
|
||||
},
|
||||
{
|
||||
"pattern": "://.*.?example\\.com"
|
||||
},
|
||||
{
|
||||
"pattern": "developer.github.com"
|
||||
},
|
||||
{
|
||||
"pattern": "docs.github.com"
|
||||
},
|
||||
{
|
||||
"pattern": "support.google.com"
|
||||
},
|
||||
{
|
||||
"pattern": "tailscale.com"
|
||||
},
|
||||
{
|
||||
"pattern": "wireguard.com"
|
||||
}
|
||||
],
|
||||
"aliveStatusCodes": [200, 0]
|
||||
}
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
name: release-validation
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
|
||||
jobs:
|
||||
network-performance:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Run Schmoder CI
|
||||
uses: benc-uk/workflow-dispatch@v1.2.4
|
||||
with:
|
||||
workflow: ci.yaml
|
||||
repo: coder/schmoder
|
||||
inputs: '{ "num_releases": "3", "commit": "${{ github.sha }}" }'
|
||||
token: ${{ secrets.CDRCI_SCHMODER_ACTIONS_TOKEN }}
|
||||
ref: main
|
||||
+6
-12
@@ -79,19 +79,13 @@ result
|
||||
# by Prettier.
|
||||
helm/**/templates/*.yaml
|
||||
|
||||
# Terraform state files used in tests, these are automatically generated.
|
||||
# Example: provisioner/terraform/testdata/instance-id/instance-id.tfstate.json
|
||||
**/testdata/**/*.tf*.json
|
||||
|
||||
# Testdata shouldn't be formatted.
|
||||
scripts/apitypings/testdata/**/*.ts
|
||||
enterprise/tailnet/testdata/*.golden.html
|
||||
tailnet/testdata/*.golden.html
|
||||
|
||||
# Generated files shouldn't be formatted.
|
||||
site/e2e/provisionerGenerated.ts
|
||||
testdata/
|
||||
|
||||
# Ignore generated files
|
||||
**/pnpm-lock.yaml
|
||||
|
||||
# Ignore generated JSON (e.g. examples/examples.gen.json).
|
||||
**/*.gen.json
|
||||
|
||||
# Everything in site/ is formatted by Biome. For the rest of the repo though, we
|
||||
# need broader language support.
|
||||
site/
|
||||
|
||||
+6
-12
@@ -2,19 +2,13 @@
|
||||
# by Prettier.
|
||||
helm/**/templates/*.yaml
|
||||
|
||||
# Terraform state files used in tests, these are automatically generated.
|
||||
# Example: provisioner/terraform/testdata/instance-id/instance-id.tfstate.json
|
||||
**/testdata/**/*.tf*.json
|
||||
|
||||
# Testdata shouldn't be formatted.
|
||||
scripts/apitypings/testdata/**/*.ts
|
||||
enterprise/tailnet/testdata/*.golden.html
|
||||
tailnet/testdata/*.golden.html
|
||||
|
||||
# Generated files shouldn't be formatted.
|
||||
site/e2e/provisionerGenerated.ts
|
||||
testdata/
|
||||
|
||||
# Ignore generated files
|
||||
**/pnpm-lock.yaml
|
||||
|
||||
# Ignore generated JSON (e.g. examples/examples.gen.json).
|
||||
**/*.gen.json
|
||||
|
||||
# Everything in site/ is formatted by Biome. For the rest of the repo though, we
|
||||
# need broader language support.
|
||||
site/
|
||||
|
||||
+3
-3
@@ -4,13 +4,13 @@
|
||||
printWidth: 80
|
||||
proseWrap: always
|
||||
trailingComma: all
|
||||
useTabs: false
|
||||
useTabs: true
|
||||
tabWidth: 2
|
||||
overrides:
|
||||
- files:
|
||||
- README.md
|
||||
- docs/api/**/*.md
|
||||
- docs/cli/**/*.md
|
||||
- docs/reference/api/**/*.md
|
||||
- docs/reference/cli/**/*.md
|
||||
- docs/changelogs/*.md
|
||||
- .github/**/*.{yaml,yml,toml}
|
||||
- scripts/**/*.{yaml,yml,toml}
|
||||
|
||||
Vendored
+13
-13
@@ -1,15 +1,15 @@
|
||||
{
|
||||
"recommendations": [
|
||||
"github.vscode-codeql",
|
||||
"golang.go",
|
||||
"hashicorp.terraform",
|
||||
"esbenp.prettier-vscode",
|
||||
"foxundermoon.shell-format",
|
||||
"emeraldwalk.runonsave",
|
||||
"zxh404.vscode-proto3",
|
||||
"redhat.vscode-yaml",
|
||||
"streetsidesoftware.code-spell-checker",
|
||||
"dbaeumer.vscode-eslint",
|
||||
"EditorConfig.EditorConfig"
|
||||
]
|
||||
"recommendations": [
|
||||
"github.vscode-codeql",
|
||||
"golang.go",
|
||||
"hashicorp.terraform",
|
||||
"esbenp.prettier-vscode",
|
||||
"foxundermoon.shell-format",
|
||||
"emeraldwalk.runonsave",
|
||||
"zxh404.vscode-proto3",
|
||||
"redhat.vscode-yaml",
|
||||
"streetsidesoftware.code-spell-checker",
|
||||
"EditorConfig.EditorConfig",
|
||||
"biomejs.biome"
|
||||
]
|
||||
}
|
||||
|
||||
Vendored
+236
-225
@@ -1,227 +1,238 @@
|
||||
{
|
||||
"cSpell.words": [
|
||||
"afero",
|
||||
"agentsdk",
|
||||
"apps",
|
||||
"ASKPASS",
|
||||
"authcheck",
|
||||
"autostop",
|
||||
"awsidentity",
|
||||
"bodyclose",
|
||||
"buildinfo",
|
||||
"buildname",
|
||||
"circbuf",
|
||||
"cliflag",
|
||||
"cliui",
|
||||
"codecov",
|
||||
"coderd",
|
||||
"coderdenttest",
|
||||
"coderdtest",
|
||||
"codersdk",
|
||||
"contravariance",
|
||||
"cronstrue",
|
||||
"databasefake",
|
||||
"dbgen",
|
||||
"dbmem",
|
||||
"dbtype",
|
||||
"DERP",
|
||||
"derphttp",
|
||||
"derpmap",
|
||||
"devel",
|
||||
"devtunnel",
|
||||
"dflags",
|
||||
"drpc",
|
||||
"drpcconn",
|
||||
"drpcmux",
|
||||
"drpcserver",
|
||||
"Dsts",
|
||||
"embeddedpostgres",
|
||||
"enablements",
|
||||
"enterprisemeta",
|
||||
"errgroup",
|
||||
"eventsourcemock",
|
||||
"externalauth",
|
||||
"Failf",
|
||||
"fatih",
|
||||
"Formik",
|
||||
"gitauth",
|
||||
"gitsshkey",
|
||||
"goarch",
|
||||
"gographviz",
|
||||
"goleak",
|
||||
"gonet",
|
||||
"gossh",
|
||||
"gsyslog",
|
||||
"GTTY",
|
||||
"hashicorp",
|
||||
"hclsyntax",
|
||||
"httpapi",
|
||||
"httpmw",
|
||||
"idtoken",
|
||||
"Iflag",
|
||||
"incpatch",
|
||||
"initialisms",
|
||||
"ipnstate",
|
||||
"isatty",
|
||||
"Jobf",
|
||||
"Keygen",
|
||||
"kirsle",
|
||||
"Kubernetes",
|
||||
"ldflags",
|
||||
"magicsock",
|
||||
"manifoldco",
|
||||
"mapstructure",
|
||||
"mattn",
|
||||
"mitchellh",
|
||||
"moby",
|
||||
"namesgenerator",
|
||||
"namespacing",
|
||||
"netaddr",
|
||||
"netip",
|
||||
"netmap",
|
||||
"netns",
|
||||
"netstack",
|
||||
"nettype",
|
||||
"nfpms",
|
||||
"nhooyr",
|
||||
"nmcfg",
|
||||
"nolint",
|
||||
"nosec",
|
||||
"ntqry",
|
||||
"OIDC",
|
||||
"oneof",
|
||||
"opty",
|
||||
"paralleltest",
|
||||
"parameterscopeid",
|
||||
"pqtype",
|
||||
"prometheusmetrics",
|
||||
"promhttp",
|
||||
"protobuf",
|
||||
"provisionerd",
|
||||
"provisionerdserver",
|
||||
"provisionersdk",
|
||||
"ptty",
|
||||
"ptys",
|
||||
"ptytest",
|
||||
"quickstart",
|
||||
"reconfig",
|
||||
"replicasync",
|
||||
"retrier",
|
||||
"rpty",
|
||||
"SCIM",
|
||||
"sdkproto",
|
||||
"sdktrace",
|
||||
"Signup",
|
||||
"slogtest",
|
||||
"sourcemapped",
|
||||
"spinbutton",
|
||||
"Srcs",
|
||||
"stdbuf",
|
||||
"stretchr",
|
||||
"STTY",
|
||||
"stuntest",
|
||||
"tailbroker",
|
||||
"tailcfg",
|
||||
"tailexchange",
|
||||
"tailnet",
|
||||
"tailnettest",
|
||||
"Tailscale",
|
||||
"tanstack",
|
||||
"tbody",
|
||||
"TCGETS",
|
||||
"tcpip",
|
||||
"TCSETS",
|
||||
"templateversions",
|
||||
"testdata",
|
||||
"testid",
|
||||
"testutil",
|
||||
"tfexec",
|
||||
"tfjson",
|
||||
"tfplan",
|
||||
"tfstate",
|
||||
"thead",
|
||||
"tios",
|
||||
"tmpdir",
|
||||
"tokenconfig",
|
||||
"Topbar",
|
||||
"tparallel",
|
||||
"trialer",
|
||||
"trimprefix",
|
||||
"tsdial",
|
||||
"tslogger",
|
||||
"tstun",
|
||||
"turnconn",
|
||||
"typegen",
|
||||
"typesafe",
|
||||
"unconvert",
|
||||
"Untar",
|
||||
"Userspace",
|
||||
"VMID",
|
||||
"walkthrough",
|
||||
"weblinks",
|
||||
"webrtc",
|
||||
"wgcfg",
|
||||
"wgconfig",
|
||||
"wgengine",
|
||||
"wgmonitor",
|
||||
"wgnet",
|
||||
"workspaceagent",
|
||||
"workspaceagents",
|
||||
"workspaceapp",
|
||||
"workspaceapps",
|
||||
"workspacebuilds",
|
||||
"workspacename",
|
||||
"wsjson",
|
||||
"xerrors",
|
||||
"xlarge",
|
||||
"xsmall",
|
||||
"yamux"
|
||||
],
|
||||
"cSpell.ignorePaths": ["site/package.json", ".vscode/settings.json"],
|
||||
"emeraldwalk.runonsave": {
|
||||
"commands": [
|
||||
{
|
||||
"match": "database/queries/*.sql",
|
||||
"cmd": "make gen"
|
||||
},
|
||||
{
|
||||
"match": "provisionerd/proto/provisionerd.proto",
|
||||
"cmd": "make provisionerd/proto/provisionerd.pb.go"
|
||||
}
|
||||
]
|
||||
},
|
||||
"eslint.workingDirectories": ["./site"],
|
||||
"search.exclude": {
|
||||
"**.pb.go": true,
|
||||
"**/*.gen.json": true,
|
||||
"**/testdata/*": true,
|
||||
"coderd/apidoc/**": true,
|
||||
"docs/api/*.md": true,
|
||||
"docs/templates/*.md": true,
|
||||
"LICENSE": true,
|
||||
"scripts/metricsdocgen/metrics": true,
|
||||
"site/out/**": true,
|
||||
"site/storybook-static/**": true,
|
||||
"**.map": true,
|
||||
"pnpm-lock.yaml": true
|
||||
},
|
||||
// Ensure files always have a newline.
|
||||
"files.insertFinalNewline": true,
|
||||
"go.lintTool": "golangci-lint",
|
||||
"go.lintFlags": ["--fast"],
|
||||
"go.coverageDecorator": {
|
||||
"type": "gutter",
|
||||
"coveredGutterStyle": "blockgreen",
|
||||
"uncoveredGutterStyle": "blockred"
|
||||
},
|
||||
// The codersdk is used by coderd another other packages extensively.
|
||||
// To reduce redundancy in tests, it's covered by other packages.
|
||||
// Since package coverage pairing can't be defined, all packages cover
|
||||
// all other packages.
|
||||
"go.testFlags": ["-short", "-coverpkg=./..."],
|
||||
// We often use a version of TypeScript that's ahead of the version shipped
|
||||
// with VS Code.
|
||||
"typescript.tsdk": "./site/node_modules/typescript/lib",
|
||||
// Playwright tests in VSCode will open a browser to live "view" the test.
|
||||
"playwright.reuseBrowser": true
|
||||
"cSpell.words": [
|
||||
"afero",
|
||||
"agentsdk",
|
||||
"apps",
|
||||
"ASKPASS",
|
||||
"authcheck",
|
||||
"autostop",
|
||||
"awsidentity",
|
||||
"bodyclose",
|
||||
"buildinfo",
|
||||
"buildname",
|
||||
"circbuf",
|
||||
"cliflag",
|
||||
"cliui",
|
||||
"codecov",
|
||||
"coderd",
|
||||
"coderdenttest",
|
||||
"coderdtest",
|
||||
"codersdk",
|
||||
"contravariance",
|
||||
"cronstrue",
|
||||
"databasefake",
|
||||
"dbgen",
|
||||
"dbmem",
|
||||
"dbtype",
|
||||
"DERP",
|
||||
"derphttp",
|
||||
"derpmap",
|
||||
"devel",
|
||||
"devtunnel",
|
||||
"dflags",
|
||||
"drpc",
|
||||
"drpcconn",
|
||||
"drpcmux",
|
||||
"drpcserver",
|
||||
"Dsts",
|
||||
"embeddedpostgres",
|
||||
"enablements",
|
||||
"enterprisemeta",
|
||||
"errgroup",
|
||||
"eventsourcemock",
|
||||
"externalauth",
|
||||
"Failf",
|
||||
"fatih",
|
||||
"Formik",
|
||||
"gitauth",
|
||||
"gitsshkey",
|
||||
"goarch",
|
||||
"gographviz",
|
||||
"goleak",
|
||||
"gonet",
|
||||
"gossh",
|
||||
"gsyslog",
|
||||
"GTTY",
|
||||
"hashicorp",
|
||||
"hclsyntax",
|
||||
"httpapi",
|
||||
"httpmw",
|
||||
"idtoken",
|
||||
"Iflag",
|
||||
"incpatch",
|
||||
"initialisms",
|
||||
"ipnstate",
|
||||
"isatty",
|
||||
"Jobf",
|
||||
"Keygen",
|
||||
"kirsle",
|
||||
"Kubernetes",
|
||||
"ldflags",
|
||||
"magicsock",
|
||||
"manifoldco",
|
||||
"mapstructure",
|
||||
"mattn",
|
||||
"mitchellh",
|
||||
"moby",
|
||||
"namesgenerator",
|
||||
"namespacing",
|
||||
"netaddr",
|
||||
"netip",
|
||||
"netmap",
|
||||
"netns",
|
||||
"netstack",
|
||||
"nettype",
|
||||
"nfpms",
|
||||
"nhooyr",
|
||||
"nmcfg",
|
||||
"nolint",
|
||||
"nosec",
|
||||
"ntqry",
|
||||
"OIDC",
|
||||
"oneof",
|
||||
"opty",
|
||||
"paralleltest",
|
||||
"parameterscopeid",
|
||||
"pqtype",
|
||||
"prometheusmetrics",
|
||||
"promhttp",
|
||||
"protobuf",
|
||||
"provisionerd",
|
||||
"provisionerdserver",
|
||||
"provisionersdk",
|
||||
"ptty",
|
||||
"ptys",
|
||||
"ptytest",
|
||||
"quickstart",
|
||||
"reconfig",
|
||||
"replicasync",
|
||||
"retrier",
|
||||
"rpty",
|
||||
"SCIM",
|
||||
"sdkproto",
|
||||
"sdktrace",
|
||||
"Signup",
|
||||
"slogtest",
|
||||
"sourcemapped",
|
||||
"spinbutton",
|
||||
"Srcs",
|
||||
"stdbuf",
|
||||
"stretchr",
|
||||
"STTY",
|
||||
"stuntest",
|
||||
"tailbroker",
|
||||
"tailcfg",
|
||||
"tailexchange",
|
||||
"tailnet",
|
||||
"tailnettest",
|
||||
"Tailscale",
|
||||
"tanstack",
|
||||
"tbody",
|
||||
"TCGETS",
|
||||
"tcpip",
|
||||
"TCSETS",
|
||||
"templateversions",
|
||||
"testdata",
|
||||
"testid",
|
||||
"testutil",
|
||||
"tfexec",
|
||||
"tfjson",
|
||||
"tfplan",
|
||||
"tfstate",
|
||||
"thead",
|
||||
"tios",
|
||||
"tmpdir",
|
||||
"tokenconfig",
|
||||
"Topbar",
|
||||
"tparallel",
|
||||
"trialer",
|
||||
"trimprefix",
|
||||
"tsdial",
|
||||
"tslogger",
|
||||
"tstun",
|
||||
"turnconn",
|
||||
"typegen",
|
||||
"typesafe",
|
||||
"unconvert",
|
||||
"Untar",
|
||||
"Userspace",
|
||||
"VMID",
|
||||
"walkthrough",
|
||||
"weblinks",
|
||||
"webrtc",
|
||||
"wgcfg",
|
||||
"wgconfig",
|
||||
"wgengine",
|
||||
"wgmonitor",
|
||||
"wgnet",
|
||||
"workspaceagent",
|
||||
"workspaceagents",
|
||||
"workspaceapp",
|
||||
"workspaceapps",
|
||||
"workspacebuilds",
|
||||
"workspacename",
|
||||
"wsjson",
|
||||
"xerrors",
|
||||
"xlarge",
|
||||
"xsmall",
|
||||
"yamux"
|
||||
],
|
||||
"cSpell.ignorePaths": ["site/package.json", ".vscode/settings.json"],
|
||||
"emeraldwalk.runonsave": {
|
||||
"commands": [
|
||||
{
|
||||
"match": "database/queries/*.sql",
|
||||
"cmd": "make gen"
|
||||
},
|
||||
{
|
||||
"match": "provisionerd/proto/provisionerd.proto",
|
||||
"cmd": "make provisionerd/proto/provisionerd.pb.go"
|
||||
}
|
||||
]
|
||||
},
|
||||
"search.exclude": {
|
||||
"**.pb.go": true,
|
||||
"**/*.gen.json": true,
|
||||
"**/testdata/*": true,
|
||||
"coderd/apidoc/**": true,
|
||||
"docs/reference/api/*.md": true,
|
||||
"docs/reference/cli/*.md": true,
|
||||
"docs/templates/*.md": true,
|
||||
"LICENSE": true,
|
||||
"scripts/metricsdocgen/metrics": true,
|
||||
"site/out/**": true,
|
||||
"site/storybook-static/**": true,
|
||||
"**.map": true,
|
||||
"pnpm-lock.yaml": true
|
||||
},
|
||||
// Ensure files always have a newline.
|
||||
"files.insertFinalNewline": true,
|
||||
"go.lintTool": "golangci-lint",
|
||||
"go.lintFlags": ["--fast"],
|
||||
"go.coverageDecorator": {
|
||||
"type": "gutter",
|
||||
"coveredGutterStyle": "blockgreen",
|
||||
"uncoveredGutterStyle": "blockred"
|
||||
},
|
||||
// The codersdk is used by coderd another other packages extensively.
|
||||
// To reduce redundancy in tests, it's covered by other packages.
|
||||
// Since package coverage pairing can't be defined, all packages cover
|
||||
// all other packages.
|
||||
"go.testFlags": ["-short", "-coverpkg=./..."],
|
||||
// We often use a version of TypeScript that's ahead of the version shipped
|
||||
// with VS Code.
|
||||
"typescript.tsdk": "./site/node_modules/typescript/lib",
|
||||
// Playwright tests in VSCode will open a browser to live "view" the test.
|
||||
"playwright.reuseBrowser": true,
|
||||
|
||||
"[javascript][javascriptreact][json][jsonc][typescript][typescriptreact]": {
|
||||
"editor.defaultFormatter": "biomejs.biome"
|
||||
// "editor.codeActionsOnSave": {
|
||||
// "source.organizeImports.biome": "explicit"
|
||||
// }
|
||||
},
|
||||
|
||||
"[css][html][markdown][yaml]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -391,7 +391,7 @@ BOLD := $(shell tput bold 2>/dev/null)
|
||||
GREEN := $(shell tput setaf 2 2>/dev/null)
|
||||
RESET := $(shell tput sgr0 2>/dev/null)
|
||||
|
||||
fmt: fmt/eslint fmt/prettier fmt/terraform fmt/shfmt fmt/go
|
||||
fmt: fmt/ts fmt/go fmt/terraform fmt/shfmt fmt/prettier
|
||||
.PHONY: fmt
|
||||
|
||||
fmt/go:
|
||||
@@ -401,15 +401,19 @@ fmt/go:
|
||||
go run mvdan.cc/gofumpt@v0.4.0 -w -l .
|
||||
.PHONY: fmt/go
|
||||
|
||||
fmt/eslint:
|
||||
echo "$(GREEN)==>$(RESET) $(BOLD)fmt/eslint$(RESET)"
|
||||
fmt/ts:
|
||||
echo "$(GREEN)==>$(RESET) $(BOLD)fmt/ts$(RESET)"
|
||||
cd site
|
||||
pnpm run lint:fix
|
||||
.PHONY: fmt/eslint
|
||||
# Avoid writing files in CI to reduce file write activity
|
||||
ifdef CI
|
||||
pnpm run check --linter-enabled=false
|
||||
else
|
||||
pnpm run check:fix
|
||||
endif
|
||||
.PHONY: fmt/ts
|
||||
|
||||
fmt/prettier:
|
||||
fmt/prettier: .prettierignore
|
||||
echo "$(GREEN)==>$(RESET) $(BOLD)fmt/prettier$(RESET)"
|
||||
cd site
|
||||
# Avoid writing files in CI to reduce file write activity
|
||||
ifdef CI
|
||||
pnpm run format:check
|
||||
@@ -442,12 +446,12 @@ lint/site-icons:
|
||||
|
||||
lint/ts:
|
||||
cd site
|
||||
pnpm i && pnpm lint
|
||||
pnpm lint
|
||||
.PHONY: lint/ts
|
||||
|
||||
lint/go:
|
||||
./scripts/check_enterprise_imports.sh
|
||||
linter_ver=$(shell egrep -o 'GOLANGCI_LINT_VERSION=\S+' dogfood/Dockerfile | cut -d '=' -f 2)
|
||||
linter_ver=$(shell egrep -o 'GOLANGCI_LINT_VERSION=\S+' dogfood/contents/Dockerfile | cut -d '=' -f 2)
|
||||
go run github.com/golangci/golangci-lint/cmd/golangci-lint@v$$linter_ver run
|
||||
.PHONY: lint/go
|
||||
|
||||
@@ -487,17 +491,14 @@ gen: \
|
||||
site/src/api/typesGenerated.ts \
|
||||
coderd/rbac/object_gen.go \
|
||||
codersdk/rbacresources_gen.go \
|
||||
site/src/api/rbacresources_gen.ts \
|
||||
site/src/api/rbacresourcesGenerated.ts \
|
||||
docs/admin/prometheus.md \
|
||||
docs/cli.md \
|
||||
docs/reference/cli/README.md \
|
||||
docs/admin/audit-logs.md \
|
||||
coderd/apidoc/swagger.json \
|
||||
.prettierignore.include \
|
||||
.prettierignore \
|
||||
provisioner/terraform/testdata/version \
|
||||
site/.prettierrc.yaml \
|
||||
site/.prettierignore \
|
||||
site/.eslintignore \
|
||||
site/e2e/provisionerGenerated.ts \
|
||||
site/src/theme/icons.json \
|
||||
examples/examples.gen.json \
|
||||
@@ -519,16 +520,13 @@ gen/mark-fresh:
|
||||
site/src/api/typesGenerated.ts \
|
||||
coderd/rbac/object_gen.go \
|
||||
codersdk/rbacresources_gen.go \
|
||||
site/src/api/rbacresources_gen.ts \
|
||||
site/src/api/rbacresourcesGenerated.ts \
|
||||
docs/admin/prometheus.md \
|
||||
docs/cli.md \
|
||||
docs/reference/cli/README.md \
|
||||
docs/admin/audit-logs.md \
|
||||
coderd/apidoc/swagger.json \
|
||||
.prettierignore.include \
|
||||
.prettierignore \
|
||||
site/.prettierrc.yaml \
|
||||
site/.prettierignore \
|
||||
site/.eslintignore \
|
||||
site/e2e/provisionerGenerated.ts \
|
||||
site/src/theme/icons.json \
|
||||
examples/examples.gen.json \
|
||||
@@ -603,7 +601,6 @@ provisionerd/proto/provisionerd.pb.go: provisionerd/proto/provisionerd.proto
|
||||
site/src/api/typesGenerated.ts: $(wildcard scripts/apitypings/*) $(shell find ./codersdk $(FIND_EXCLUSIONS) -type f -name '*.go')
|
||||
go run ./scripts/apitypings/ > $@
|
||||
./scripts/pnpm_install.sh
|
||||
pnpm exec prettier --write "$@"
|
||||
|
||||
site/e2e/provisionerGenerated.ts: provisionerd/proto/provisionerd.pb.go provisionersdk/proto/provisioner.pb.go
|
||||
cd site
|
||||
@@ -613,7 +610,7 @@ site/e2e/provisionerGenerated.ts: provisionerd/proto/provisionerd.pb.go provisio
|
||||
site/src/theme/icons.json: $(wildcard scripts/gensite/*) $(wildcard site/static/icon/*)
|
||||
go run ./scripts/gensite/ -icons "$@"
|
||||
./scripts/pnpm_install.sh
|
||||
pnpm exec prettier --write "$@"
|
||||
pnpm -C site/ exec biome format --write src/theme/icons.json
|
||||
|
||||
examples/examples.gen.json: scripts/examplegen/main.go examples/examples.go $(shell find ./examples/templates)
|
||||
go run ./scripts/examplegen/main.go > examples/examples.gen.json
|
||||
@@ -624,8 +621,8 @@ coderd/rbac/object_gen.go: scripts/rbacgen/rbacobject.gotmpl scripts/rbacgen/mai
|
||||
codersdk/rbacresources_gen.go: scripts/rbacgen/codersdk.gotmpl scripts/rbacgen/main.go coderd/rbac/object.go coderd/rbac/policy/policy.go
|
||||
go run scripts/rbacgen/main.go codersdk > codersdk/rbacresources_gen.go
|
||||
|
||||
site/src/api/rbacresources_gen.ts: scripts/rbacgen/codersdk.gotmpl scripts/rbacgen/main.go coderd/rbac/object.go coderd/rbac/policy/policy.go
|
||||
go run scripts/rbacgen/main.go typescript > site/src/api/rbacresources_gen.ts
|
||||
site/src/api/rbacresourcesGenerated.ts: scripts/rbacgen/codersdk.gotmpl scripts/rbacgen/main.go coderd/rbac/object.go coderd/rbac/policy/policy.go
|
||||
go run scripts/rbacgen/main.go typescript > "$@"
|
||||
|
||||
|
||||
docs/admin/prometheus.md: scripts/metricsdocgen/main.go scripts/metricsdocgen/metrics
|
||||
@@ -633,10 +630,10 @@ docs/admin/prometheus.md: scripts/metricsdocgen/main.go scripts/metricsdocgen/me
|
||||
./scripts/pnpm_install.sh
|
||||
pnpm exec prettier --write ./docs/admin/prometheus.md
|
||||
|
||||
docs/cli.md: scripts/clidocgen/main.go examples/examples.gen.json $(GO_SRC_FILES)
|
||||
docs/reference/cli/README.md: scripts/clidocgen/main.go examples/examples.gen.json $(GO_SRC_FILES)
|
||||
CI=true BASE_PATH="." go run ./scripts/clidocgen
|
||||
./scripts/pnpm_install.sh
|
||||
pnpm exec prettier --write ./docs/cli.md ./docs/cli/*.md ./docs/manifest.json
|
||||
pnpm exec prettier --write ./docs/reference/cli/README.md ./docs/reference/cli/*.md ./docs/manifest.json
|
||||
|
||||
docs/admin/audit-logs.md: coderd/database/querier.go scripts/auditdocgen/main.go enterprise/audit/table.go coderd/rbac/object_gen.go
|
||||
go run scripts/auditdocgen/main.go
|
||||
@@ -646,7 +643,7 @@ docs/admin/audit-logs.md: coderd/database/querier.go scripts/auditdocgen/main.go
|
||||
coderd/apidoc/swagger.json: $(shell find ./scripts/apidocgen $(FIND_EXCLUSIONS) -type f) $(wildcard coderd/*.go) $(wildcard enterprise/coderd/*.go) $(wildcard codersdk/*.go) $(wildcard enterprise/wsproxy/wsproxysdk/*.go) $(DB_GEN_FILES) .swaggo docs/manifest.json coderd/rbac/object_gen.go
|
||||
./scripts/apidocgen/generate.sh
|
||||
./scripts/pnpm_install.sh
|
||||
pnpm exec prettier --write ./docs/api ./docs/manifest.json ./coderd/apidoc/swagger.json
|
||||
pnpm exec prettier --write ./docs/reference/api ./docs/manifest.json ./coderd/apidoc/swagger.json
|
||||
|
||||
update-golden-files: \
|
||||
cli/testdata/.gen-golden \
|
||||
@@ -702,23 +699,6 @@ scripts/ci-report/testdata/.gen-golden: $(wildcard scripts/ci-report/testdata/*)
|
||||
go test ./scripts/ci-report -run=TestOutputMatchesGoldenFile -update
|
||||
touch "$@"
|
||||
|
||||
# Generate a prettierrc for the site package that uses relative paths for
|
||||
# overrides. This allows us to share the same prettier config between the
|
||||
# site and the root of the repo.
|
||||
site/.prettierrc.yaml: .prettierrc.yaml
|
||||
. ./scripts/lib.sh
|
||||
dependencies yq
|
||||
|
||||
echo "# Code generated by Makefile (../$<). DO NOT EDIT." > "$@"
|
||||
echo "" >> "$@"
|
||||
|
||||
# Replace all listed override files with relative paths inside site/.
|
||||
# - ./ -> ../
|
||||
# - ./site -> ./
|
||||
yq \
|
||||
'.overrides[].files |= map(. | sub("^./"; "") | sub("^"; "../") | sub("../site/"; "./") | sub("../!"; "!../"))' \
|
||||
"$<" >> "$@"
|
||||
|
||||
# Combine .gitignore with .prettierignore.include to generate .prettierignore.
|
||||
.prettierignore: .gitignore .prettierignore.include
|
||||
echo "# Code generated by Makefile ($^). DO NOT EDIT." > "$@"
|
||||
@@ -728,40 +708,6 @@ site/.prettierrc.yaml: .prettierrc.yaml
|
||||
cat "$$f" >> "$@"
|
||||
done
|
||||
|
||||
# Generate ignore files based on gitignore into the site directory. We turn all
|
||||
# rules into relative paths for the `site/` directory (where applicable),
|
||||
# following the pattern format defined by git:
|
||||
# https://git-scm.com/docs/gitignore#_pattern_format
|
||||
#
|
||||
# This is done for compatibility reasons, see:
|
||||
# https://github.com/prettier/prettier/issues/8048
|
||||
# https://github.com/prettier/prettier/issues/8506
|
||||
# https://github.com/prettier/prettier/issues/8679
|
||||
site/.eslintignore site/.prettierignore: .prettierignore Makefile
|
||||
rm -f "$@"
|
||||
touch "$@"
|
||||
# Skip generated by header, inherit `.prettierignore` header as-is.
|
||||
while read -r rule; do
|
||||
# Remove leading ! if present to simplify rule, added back at the end.
|
||||
tmp="$${rule#!}"
|
||||
ignore="$${rule%"$$tmp"}"
|
||||
rule="$$tmp"
|
||||
case "$$rule" in
|
||||
# Comments or empty lines (include).
|
||||
\#*|'') ;;
|
||||
# Generic rules (include).
|
||||
\*\**) ;;
|
||||
# Site prefixed rules (include).
|
||||
site/*) rule="$${rule#site/}";;
|
||||
./site/*) rule="$${rule#./site/}";;
|
||||
# Rules that are non-generic and don't start with site (rewrite).
|
||||
/*) rule=.."$$rule";;
|
||||
*/?*) rule=../"$$rule";;
|
||||
*) ;;
|
||||
esac
|
||||
echo "$${ignore}$${rule}" >> "$@"
|
||||
done < "$<"
|
||||
|
||||
test:
|
||||
$(GIT_FLAGS) gotestsum --format standard-quiet -- -v -short -count=1 ./...
|
||||
.PHONY: test
|
||||
|
||||
+18
-6
@@ -588,10 +588,12 @@ func (a *agent) reportMetadata(ctx context.Context, conn drpc.Conn) error {
|
||||
updatedMetadata[mr.key] = mr.result
|
||||
continue
|
||||
case err := <-reportError:
|
||||
a.logger.Debug(ctx, "batch update metadata complete", slog.Error(err))
|
||||
logMsg := "batch update metadata complete"
|
||||
if err != nil {
|
||||
a.logger.Debug(ctx, logMsg, slog.Error(err))
|
||||
return xerrors.Errorf("failed to report metadata: %w", err)
|
||||
}
|
||||
a.logger.Debug(ctx, logMsg)
|
||||
reportInFlight = false
|
||||
case <-report:
|
||||
if len(updatedMetadata) == 0 {
|
||||
@@ -1667,13 +1669,12 @@ func (a *agent) manageProcessPriority(ctx context.Context, debouncer *logDebounc
|
||||
}
|
||||
|
||||
score, niceErr := proc.Niceness(a.syscaller)
|
||||
if niceErr != nil && !xerrors.Is(niceErr, os.ErrPermission) {
|
||||
if !isBenignProcessErr(niceErr) {
|
||||
debouncer.Warn(ctx, "unable to get proc niceness",
|
||||
slog.F("cmd", proc.Cmd()),
|
||||
slog.F("pid", proc.PID),
|
||||
slog.Error(niceErr),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
// We only want processes that don't have a nice value set
|
||||
@@ -1687,7 +1688,7 @@ func (a *agent) manageProcessPriority(ctx context.Context, debouncer *logDebounc
|
||||
|
||||
if niceErr == nil {
|
||||
err := proc.SetNiceness(a.syscaller, niceness)
|
||||
if err != nil && !xerrors.Is(err, os.ErrPermission) {
|
||||
if !isBenignProcessErr(err) {
|
||||
debouncer.Warn(ctx, "unable to set proc niceness",
|
||||
slog.F("cmd", proc.Cmd()),
|
||||
slog.F("pid", proc.PID),
|
||||
@@ -1701,7 +1702,7 @@ func (a *agent) manageProcessPriority(ctx context.Context, debouncer *logDebounc
|
||||
if oomScore != unsetOOMScore && oomScore != proc.OOMScoreAdj && !isCustomOOMScore(agentScore, proc) {
|
||||
oomScoreStr := strconv.Itoa(oomScore)
|
||||
err := afero.WriteFile(a.filesystem, fmt.Sprintf("/proc/%d/oom_score_adj", proc.PID), []byte(oomScoreStr), 0o644)
|
||||
if err != nil && !xerrors.Is(err, os.ErrPermission) {
|
||||
if !isBenignProcessErr(err) {
|
||||
debouncer.Warn(ctx, "unable to set oom_score_adj",
|
||||
slog.F("cmd", proc.Cmd()),
|
||||
slog.F("pid", proc.PID),
|
||||
@@ -1785,7 +1786,7 @@ func (a *agent) HandleHTTPDebugLogs(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Limit to 10MB.
|
||||
// Limit to 10MiB.
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err = io.Copy(w, io.LimitReader(f, 10*1024*1024))
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
@@ -2137,3 +2138,14 @@ func (l *logDebouncer) log(ctx context.Context, level slog.Level, msg string, fi
|
||||
}
|
||||
l.messages[msg] = time.Now()
|
||||
}
|
||||
|
||||
func isBenignProcessErr(err error) bool {
|
||||
return err != nil &&
|
||||
(xerrors.Is(err, os.ErrNotExist) ||
|
||||
xerrors.Is(err, os.ErrPermission) ||
|
||||
isNoSuchProcessErr(err))
|
||||
}
|
||||
|
||||
func isNoSuchProcessErr(err error) bool {
|
||||
return err != nil && strings.Contains(err.Error(), "no such process")
|
||||
}
|
||||
|
||||
@@ -45,8 +45,7 @@ func List(fs afero.Fs, syscaller Syscaller) ([]*Process, error) {
|
||||
|
||||
cmdline, err := afero.ReadFile(fs, filepath.Join(defaultProcDir, entry, "cmdline"))
|
||||
if err != nil {
|
||||
var errNo syscall.Errno
|
||||
if xerrors.As(err, &errNo) && errNo == syscall.EPERM {
|
||||
if isBenignError(err) {
|
||||
continue
|
||||
}
|
||||
return nil, xerrors.Errorf("read cmdline: %w", err)
|
||||
@@ -54,7 +53,7 @@ func List(fs afero.Fs, syscaller Syscaller) ([]*Process, error) {
|
||||
|
||||
oomScore, err := afero.ReadFile(fs, filepath.Join(defaultProcDir, entry, "oom_score_adj"))
|
||||
if err != nil {
|
||||
if xerrors.Is(err, os.ErrPermission) {
|
||||
if isBenignError(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -124,3 +123,12 @@ func (p *Process) Cmd() string {
|
||||
func (p *Process) cmdLine() []string {
|
||||
return strings.Split(p.CmdLine, "\x00")
|
||||
}
|
||||
|
||||
func isBenignError(err error) bool {
|
||||
var errno syscall.Errno
|
||||
if !xerrors.As(err, &errno) {
|
||||
return false
|
||||
}
|
||||
|
||||
return errno == syscall.ESRCH || errno == syscall.EPERM || xerrors.Is(err, os.ErrNotExist)
|
||||
}
|
||||
|
||||
@@ -37,6 +37,7 @@ func (a *agent) apiHandler() http.Handler {
|
||||
}
|
||||
promHandler := PrometheusMetricsHandler(a.prometheusRegistry, a.logger)
|
||||
r.Get("/api/v0/listening-ports", lp.handler)
|
||||
r.Get("/api/v0/netcheck", a.HandleNetcheck)
|
||||
r.Get("/debug/logs", a.HandleHTTPDebugLogs)
|
||||
r.Get("/debug/magicsock", a.HandleHTTPDebugMagicsock)
|
||||
r.Get("/debug/magicsock/debug-logging/{state}", a.HandleHTTPMagicsockDebugLoggingState)
|
||||
|
||||
@@ -0,0 +1,31 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/healthcheck/health"
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/healthsdk"
|
||||
)
|
||||
|
||||
func (a *agent) HandleNetcheck(rw http.ResponseWriter, r *http.Request) {
|
||||
ni := a.TailnetConn().GetNetInfo()
|
||||
|
||||
ifReport, err := healthsdk.RunInterfacesReport()
|
||||
if err != nil {
|
||||
httpapi.Write(r.Context(), rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to run interfaces report",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
httpapi.Write(r.Context(), rw, http.StatusOK, healthsdk.AgentNetcheckReport{
|
||||
BaseReport: healthsdk.BaseReport{
|
||||
Severity: health.SeverityOK,
|
||||
},
|
||||
NetInfo: ni,
|
||||
Interfaces: ifReport,
|
||||
})
|
||||
}
|
||||
@@ -50,6 +50,8 @@ func (r *RootCmd) workspaceAgent() *serpent.Command {
|
||||
slogJSONPath string
|
||||
slogStackdriverPath string
|
||||
blockFileTransfer bool
|
||||
agentHeaderCommand string
|
||||
agentHeader []string
|
||||
)
|
||||
cmd := &serpent.Command{
|
||||
Use: "agent",
|
||||
@@ -176,6 +178,14 @@ func (r *RootCmd) workspaceAgent() *serpent.Command {
|
||||
// with large payloads can take a bit. e.g. startup scripts
|
||||
// may take a while to insert.
|
||||
client.SDK.HTTPClient.Timeout = 30 * time.Second
|
||||
// Attach header transport so we process --agent-header and
|
||||
// --agent-header-command flags
|
||||
headerTransport, err := headerTransport(ctx, r.agentURL, agentHeader, agentHeaderCommand)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("configure header transport: %w", err)
|
||||
}
|
||||
headerTransport.Transport = client.SDK.HTTPClient.Transport
|
||||
client.SDK.HTTPClient.Transport = headerTransport
|
||||
|
||||
// Enable pprof handler
|
||||
// This prevents the pprof import from being accidentally deleted.
|
||||
@@ -361,6 +371,18 @@ func (r *RootCmd) workspaceAgent() *serpent.Command {
|
||||
Value: serpent.StringOf(&pprofAddress),
|
||||
Description: "The address to serve pprof.",
|
||||
},
|
||||
{
|
||||
Flag: "agent-header-command",
|
||||
Env: "CODER_AGENT_HEADER_COMMAND",
|
||||
Value: serpent.StringOf(&agentHeaderCommand),
|
||||
Description: "An external command that outputs additional HTTP headers added to all requests. The command must output each header as `key=value` on its own line.",
|
||||
},
|
||||
{
|
||||
Flag: "agent-header",
|
||||
Env: "CODER_AGENT_HEADER",
|
||||
Value: serpent.StringArrayOf(&agentHeader),
|
||||
Description: "Additional HTTP headers added to all requests. Provide as " + `key=value` + ". Can be specified multiple times.",
|
||||
},
|
||||
{
|
||||
Flag: "no-reap",
|
||||
|
||||
|
||||
@@ -3,10 +3,13 @@ package cli_test
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -229,6 +232,43 @@ func TestWorkspaceAgent(t *testing.T) {
|
||||
require.Equal(t, codersdk.AgentSubsystemEnvbox, resources[0].Agents[0].Subsystems[0])
|
||||
require.Equal(t, codersdk.AgentSubsystemExectrace, resources[0].Agents[0].Subsystems[1])
|
||||
})
|
||||
t.Run("Header", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var url string
|
||||
var called int64
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.Equal(t, "wow", r.Header.Get("X-Testing"))
|
||||
assert.Equal(t, "Ethan was Here!", r.Header.Get("Cool-Header"))
|
||||
assert.Equal(t, "very-wow-"+url, r.Header.Get("X-Process-Testing"))
|
||||
assert.Equal(t, "more-wow", r.Header.Get("X-Process-Testing2"))
|
||||
atomic.AddInt64(&called, 1)
|
||||
w.WriteHeader(http.StatusGone)
|
||||
}))
|
||||
defer srv.Close()
|
||||
url = srv.URL
|
||||
coderURLEnv := "$CODER_URL"
|
||||
if runtime.GOOS == "windows" {
|
||||
coderURLEnv = "%CODER_URL%"
|
||||
}
|
||||
|
||||
logDir := t.TempDir()
|
||||
inv, _ := clitest.New(t,
|
||||
"agent",
|
||||
"--auth", "token",
|
||||
"--agent-token", "fake-token",
|
||||
"--agent-url", srv.URL,
|
||||
"--log-dir", logDir,
|
||||
"--agent-header", "X-Testing=wow",
|
||||
"--agent-header", "Cool-Header=Ethan was Here!",
|
||||
"--agent-header-command", "printf X-Process-Testing=very-wow-"+coderURLEnv+"'\\r\\n'X-Process-Testing2=more-wow",
|
||||
)
|
||||
|
||||
clitest.Start(t, inv)
|
||||
require.Eventually(t, func() bool {
|
||||
return atomic.LoadInt64(&called) > 0
|
||||
}, testutil.WaitShort, testutil.IntervalFast)
|
||||
})
|
||||
}
|
||||
|
||||
func matchAgentWithVersion(rs []codersdk.WorkspaceResource) bool {
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/cli/config"
|
||||
@@ -183,11 +184,11 @@ func prepareTestData(t *testing.T) (*codersdk.Client, map[string]string) {
|
||||
IncludeProvisionerDaemon: true,
|
||||
})
|
||||
firstUser := coderdtest.CreateFirstUser(t, rootClient)
|
||||
secondUser, err := rootClient.CreateUser(ctx, codersdk.CreateUserRequest{
|
||||
Email: "testuser2@coder.com",
|
||||
Username: "testuser2",
|
||||
Password: coderdtest.FirstUserParams.Password,
|
||||
OrganizationID: firstUser.OrganizationID,
|
||||
secondUser, err := rootClient.CreateUserWithOrgs(ctx, codersdk.CreateUserRequestWithOrgs{
|
||||
Email: "testuser2@coder.com",
|
||||
Username: "testuser2",
|
||||
Password: coderdtest.FirstUserParams.Password,
|
||||
OrganizationIDs: []uuid.UUID{firstUser.OrganizationID},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
version := coderdtest.CreateTemplateVersion(t, rootClient, firstUser.OrganizationID, nil)
|
||||
|
||||
@@ -10,8 +10,11 @@ import (
|
||||
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/xerrors"
|
||||
"tailscale.com/tailcfg"
|
||||
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/healthsdk"
|
||||
"github.com/coder/coder/v2/codersdk/workspacesdk"
|
||||
"github.com/coder/coder/v2/tailnet"
|
||||
)
|
||||
|
||||
@@ -346,3 +349,102 @@ func PeerDiagnostics(w io.Writer, d tailnet.PeerDiagnostics) {
|
||||
_, _ = fmt.Fprint(w, "✘ Wireguard is not connected\n")
|
||||
}
|
||||
}
|
||||
|
||||
type ConnDiags struct {
|
||||
ConnInfo workspacesdk.AgentConnectionInfo
|
||||
PingP2P bool
|
||||
DisableDirect bool
|
||||
LocalNetInfo *tailcfg.NetInfo
|
||||
LocalInterfaces *healthsdk.InterfacesReport
|
||||
AgentNetcheck *healthsdk.AgentNetcheckReport
|
||||
ClientIPIsAWS bool
|
||||
AgentIPIsAWS bool
|
||||
Verbose bool
|
||||
// TODO: More diagnostics
|
||||
}
|
||||
|
||||
func (d ConnDiags) Write(w io.Writer) {
|
||||
_, _ = fmt.Fprintln(w, "")
|
||||
general, client, agent := d.splitDiagnostics()
|
||||
for _, msg := range general {
|
||||
_, _ = fmt.Fprintln(w, msg)
|
||||
}
|
||||
if len(client) > 0 {
|
||||
_, _ = fmt.Fprint(w, "Possible client-side issues with direct connection:\n\n")
|
||||
for _, msg := range client {
|
||||
_, _ = fmt.Fprintf(w, " - %s\n\n", msg)
|
||||
}
|
||||
}
|
||||
if len(agent) > 0 {
|
||||
_, _ = fmt.Fprint(w, "Possible agent-side issues with direct connections:\n\n")
|
||||
for _, msg := range agent {
|
||||
_, _ = fmt.Fprintf(w, " - %s\n\n", msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d ConnDiags) splitDiagnostics() (general, client, agent []string) {
|
||||
if d.PingP2P {
|
||||
general = append(general, "✔ You are connected directly (p2p)")
|
||||
} else {
|
||||
general = append(general, "❗ You are connected via a DERP relay, not directly (p2p)")
|
||||
}
|
||||
|
||||
if d.AgentNetcheck != nil {
|
||||
for _, msg := range d.AgentNetcheck.Interfaces.Warnings {
|
||||
agent = append(agent, msg.Message)
|
||||
}
|
||||
}
|
||||
|
||||
if d.LocalInterfaces != nil {
|
||||
for _, msg := range d.LocalInterfaces.Warnings {
|
||||
client = append(client, msg.Message)
|
||||
}
|
||||
}
|
||||
|
||||
if d.PingP2P && !d.Verbose {
|
||||
return general, client, agent
|
||||
}
|
||||
|
||||
if d.DisableDirect {
|
||||
general = append(general, "❗ Direct connections are disabled locally, by `--disable-direct` or `CODER_DISABLE_DIRECT`")
|
||||
if !d.Verbose {
|
||||
return general, client, agent
|
||||
}
|
||||
}
|
||||
|
||||
if d.ConnInfo.DisableDirectConnections {
|
||||
general = append(general, "❗ Your Coder administrator has blocked direct connections")
|
||||
if !d.Verbose {
|
||||
return general, client, agent
|
||||
}
|
||||
}
|
||||
|
||||
if !d.ConnInfo.DERPMap.HasSTUN() {
|
||||
general = append(general, "The DERP map is not configured to use STUN")
|
||||
} else if d.LocalNetInfo != nil && !d.LocalNetInfo.UDP {
|
||||
client = append(client, "Client could not connect to STUN over UDP")
|
||||
}
|
||||
|
||||
if d.LocalNetInfo != nil && d.LocalNetInfo.MappingVariesByDestIP.EqualBool(true) {
|
||||
client = append(client, "Client is potentially behind a hard NAT, as multiple endpoints were retrieved from different STUN servers")
|
||||
}
|
||||
|
||||
if d.AgentNetcheck != nil && d.AgentNetcheck.NetInfo != nil {
|
||||
if d.AgentNetcheck.NetInfo.MappingVariesByDestIP.EqualBool(true) {
|
||||
agent = append(agent, "Agent is potentially behind a hard NAT, as multiple endpoints were retrieved from different STUN servers")
|
||||
}
|
||||
if !d.AgentNetcheck.NetInfo.UDP {
|
||||
agent = append(agent, "Agent could not connect to STUN over UDP")
|
||||
}
|
||||
}
|
||||
|
||||
if d.ClientIPIsAWS {
|
||||
client = append(client, "Client IP address is within an AWS range (AWS uses hard NAT)")
|
||||
}
|
||||
|
||||
if d.AgentIPIsAWS {
|
||||
agent = append(agent, "Agent IP address is within an AWS range (AWS uses hard NAT)")
|
||||
}
|
||||
return general, client, agent
|
||||
}
|
||||
|
||||
@@ -20,8 +20,11 @@ import (
|
||||
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/coderd/healthcheck/health"
|
||||
"github.com/coder/coder/v2/coderd/util/ptr"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/healthsdk"
|
||||
"github.com/coder/coder/v2/codersdk/workspacesdk"
|
||||
"github.com/coder/coder/v2/tailnet"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
"github.com/coder/serpent"
|
||||
@@ -672,3 +675,223 @@ func TestPeerDiagnostics(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnDiagnostics(t *testing.T) {
|
||||
t.Parallel()
|
||||
testCases := []struct {
|
||||
name string
|
||||
diags cliui.ConnDiags
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "Direct",
|
||||
diags: cliui.ConnDiags{
|
||||
ConnInfo: workspacesdk.AgentConnectionInfo{
|
||||
DERPMap: &tailcfg.DERPMap{},
|
||||
},
|
||||
PingP2P: true,
|
||||
LocalNetInfo: &tailcfg.NetInfo{},
|
||||
},
|
||||
want: []string{
|
||||
`✔ You are connected directly (p2p)`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "DirectBlocked",
|
||||
diags: cliui.ConnDiags{
|
||||
ConnInfo: workspacesdk.AgentConnectionInfo{
|
||||
DERPMap: &tailcfg.DERPMap{},
|
||||
DisableDirectConnections: true,
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
`❗ You are connected via a DERP relay, not directly (p2p)`,
|
||||
`❗ Your Coder administrator has blocked direct connections`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NoStun",
|
||||
diags: cliui.ConnDiags{
|
||||
ConnInfo: workspacesdk.AgentConnectionInfo{
|
||||
DERPMap: &tailcfg.DERPMap{},
|
||||
},
|
||||
LocalNetInfo: &tailcfg.NetInfo{},
|
||||
},
|
||||
want: []string{
|
||||
`❗ You are connected via a DERP relay, not directly (p2p)`,
|
||||
`The DERP map is not configured to use STUN`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ClientHasStunNoUDP",
|
||||
diags: cliui.ConnDiags{
|
||||
ConnInfo: workspacesdk.AgentConnectionInfo{
|
||||
DERPMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
999: {
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{
|
||||
STUNPort: 1337,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
LocalNetInfo: &tailcfg.NetInfo{
|
||||
UDP: false,
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
`❗ You are connected via a DERP relay, not directly (p2p)`,
|
||||
`Client could not connect to STUN over UDP`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "AgentHasStunNoUDP",
|
||||
diags: cliui.ConnDiags{
|
||||
ConnInfo: workspacesdk.AgentConnectionInfo{
|
||||
DERPMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
999: {
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{
|
||||
STUNPort: 1337,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
AgentNetcheck: &healthsdk.AgentNetcheckReport{
|
||||
NetInfo: &tailcfg.NetInfo{
|
||||
UDP: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
`❗ You are connected via a DERP relay, not directly (p2p)`,
|
||||
`Agent could not connect to STUN over UDP`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ClientHardNat",
|
||||
diags: cliui.ConnDiags{
|
||||
ConnInfo: workspacesdk.AgentConnectionInfo{
|
||||
DERPMap: &tailcfg.DERPMap{},
|
||||
},
|
||||
LocalNetInfo: &tailcfg.NetInfo{
|
||||
MappingVariesByDestIP: "true",
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
`❗ You are connected via a DERP relay, not directly (p2p)`,
|
||||
`Client is potentially behind a hard NAT, as multiple endpoints were retrieved from different STUN servers`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "AgentHardNat",
|
||||
diags: cliui.ConnDiags{
|
||||
ConnInfo: workspacesdk.AgentConnectionInfo{
|
||||
DERPMap: &tailcfg.DERPMap{},
|
||||
},
|
||||
PingP2P: false,
|
||||
LocalNetInfo: &tailcfg.NetInfo{},
|
||||
AgentNetcheck: &healthsdk.AgentNetcheckReport{
|
||||
NetInfo: &tailcfg.NetInfo{MappingVariesByDestIP: "true"},
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
`❗ You are connected via a DERP relay, not directly (p2p)`,
|
||||
`Agent is potentially behind a hard NAT, as multiple endpoints were retrieved from different STUN servers`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "AgentInterfaceWarnings",
|
||||
diags: cliui.ConnDiags{
|
||||
ConnInfo: workspacesdk.AgentConnectionInfo{
|
||||
DERPMap: &tailcfg.DERPMap{},
|
||||
},
|
||||
PingP2P: true,
|
||||
AgentNetcheck: &healthsdk.AgentNetcheckReport{
|
||||
Interfaces: healthsdk.InterfacesReport{
|
||||
BaseReport: healthsdk.BaseReport{
|
||||
Warnings: []health.Message{
|
||||
health.Messagef(health.CodeInterfaceSmallMTU, "Network interface eth0 has MTU 1280, (less than 1378), which may degrade the quality of direct connections"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
`✔ You are connected directly (p2p)`,
|
||||
`Network interface eth0 has MTU 1280, (less than 1378), which may degrade the quality of direct connections`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "LocalInterfaceWarnings",
|
||||
diags: cliui.ConnDiags{
|
||||
ConnInfo: workspacesdk.AgentConnectionInfo{
|
||||
DERPMap: &tailcfg.DERPMap{},
|
||||
},
|
||||
PingP2P: true,
|
||||
LocalInterfaces: &healthsdk.InterfacesReport{
|
||||
BaseReport: healthsdk.BaseReport{
|
||||
Warnings: []health.Message{
|
||||
health.Messagef(health.CodeInterfaceSmallMTU, "Network interface eth1 has MTU 1310, (less than 1378), which may degrade the quality of direct connections"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
`✔ You are connected directly (p2p)`,
|
||||
`Network interface eth1 has MTU 1310, (less than 1378), which may degrade the quality of direct connections`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ClientAWSIP",
|
||||
diags: cliui.ConnDiags{
|
||||
ConnInfo: workspacesdk.AgentConnectionInfo{
|
||||
DERPMap: &tailcfg.DERPMap{},
|
||||
},
|
||||
ClientIPIsAWS: true,
|
||||
AgentIPIsAWS: false,
|
||||
},
|
||||
want: []string{
|
||||
`❗ You are connected via a DERP relay, not directly (p2p)`,
|
||||
`Client IP address is within an AWS range (AWS uses hard NAT)`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "AgentAWSIP",
|
||||
diags: cliui.ConnDiags{
|
||||
ConnInfo: workspacesdk.AgentConnectionInfo{
|
||||
DERPMap: &tailcfg.DERPMap{},
|
||||
},
|
||||
ClientIPIsAWS: false,
|
||||
AgentIPIsAWS: true,
|
||||
},
|
||||
want: []string{
|
||||
`❗ You are connected via a DERP relay, not directly (p2p)`,
|
||||
`Agent IP address is within an AWS range (AWS uses hard NAT)`,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
r, w := io.Pipe()
|
||||
go func() {
|
||||
defer w.Close()
|
||||
tc.diags.Write(w)
|
||||
}()
|
||||
bytes, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
output := string(bytes)
|
||||
for _, want := range tc.want {
|
||||
require.Contains(t, output, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
+4
-4
@@ -65,8 +65,8 @@ func (f *OutputFormatter) AttachOptions(opts *serpent.OptionSet) {
|
||||
Flag: "output",
|
||||
FlagShorthand: "o",
|
||||
Default: f.formats[0].ID(),
|
||||
Value: serpent.StringOf(&f.formatID),
|
||||
Description: "Output format. Available formats: " + strings.Join(formatNames, ", ") + ".",
|
||||
Value: serpent.EnumOf(&f.formatID, formatNames...),
|
||||
Description: "Output format.",
|
||||
},
|
||||
)
|
||||
}
|
||||
@@ -136,8 +136,8 @@ func (f *tableFormat) AttachOptions(opts *serpent.OptionSet) {
|
||||
Flag: "column",
|
||||
FlagShorthand: "c",
|
||||
Default: strings.Join(f.defaultColumns, ","),
|
||||
Value: serpent.StringArrayOf(&f.columns),
|
||||
Description: "Columns to display in table output. Available columns: " + strings.Join(f.allColumns, ", ") + ".",
|
||||
Value: serpent.EnumArrayOf(&f.columns, f.allColumns...),
|
||||
Description: "Columns to display in table output.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
@@ -106,11 +106,11 @@ func Test_OutputFormatter(t *testing.T) {
|
||||
|
||||
fs := cmd.Options.FlagSet()
|
||||
|
||||
selected, err := fs.GetString("output")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "json", selected)
|
||||
selected := cmd.Options.ByFlag("output")
|
||||
require.NotNil(t, selected)
|
||||
require.Equal(t, "json", selected.Value.String())
|
||||
usage := fs.FlagUsages()
|
||||
require.Contains(t, usage, "Available formats: json, foo")
|
||||
require.Contains(t, usage, "Output format.")
|
||||
require.Contains(t, usage, "foo flag 1234")
|
||||
|
||||
ctx := context.Background()
|
||||
@@ -129,11 +129,10 @@ func Test_OutputFormatter(t *testing.T) {
|
||||
require.Equal(t, "foo", out)
|
||||
require.EqualValues(t, 1, atomic.LoadInt64(&called))
|
||||
|
||||
require.NoError(t, fs.Set("output", "bar"))
|
||||
require.Error(t, fs.Set("output", "bar"))
|
||||
out, err = f.Format(ctx, data)
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "bar")
|
||||
require.Equal(t, "", out)
|
||||
require.EqualValues(t, 1, atomic.LoadInt64(&called))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "foo", out)
|
||||
require.EqualValues(t, 2, atomic.LoadInt64(&called))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -0,0 +1,114 @@
|
||||
package cliutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
const AWSIPRangesURL = "https://ip-ranges.amazonaws.com/ip-ranges.json"
|
||||
|
||||
type awsIPv4Prefix struct {
|
||||
Prefix string `json:"ip_prefix"`
|
||||
Region string `json:"region"`
|
||||
Service string `json:"service"`
|
||||
NetworkBorderGroup string `json:"network_border_group"`
|
||||
}
|
||||
|
||||
type awsIPv6Prefix struct {
|
||||
Prefix string `json:"ipv6_prefix"`
|
||||
Region string `json:"region"`
|
||||
Service string `json:"service"`
|
||||
NetworkBorderGroup string `json:"network_border_group"`
|
||||
}
|
||||
|
||||
type AWSIPRanges struct {
|
||||
V4 []netip.Prefix
|
||||
V6 []netip.Prefix
|
||||
}
|
||||
|
||||
type awsIPRangesResponse struct {
|
||||
SyncToken string `json:"syncToken"`
|
||||
CreateDate string `json:"createDate"`
|
||||
IPV4Prefixes []awsIPv4Prefix `json:"prefixes"`
|
||||
IPV6Prefixes []awsIPv6Prefix `json:"ipv6_prefixes"`
|
||||
}
|
||||
|
||||
func FetchAWSIPRanges(ctx context.Context, url string) (*AWSIPRanges, error) {
|
||||
client := &http.Client{}
|
||||
reqCtx, reqCancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer reqCancel()
|
||||
req, _ := http.NewRequestWithContext(reqCtx, http.MethodGet, url, nil)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
b, _ := io.ReadAll(resp.Body)
|
||||
return nil, xerrors.Errorf("unexpected status code %d: %s", resp.StatusCode, b)
|
||||
}
|
||||
|
||||
var body awsIPRangesResponse
|
||||
err = json.NewDecoder(resp.Body).Decode(&body)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("json decode: %w", err)
|
||||
}
|
||||
|
||||
out := &AWSIPRanges{
|
||||
V4: make([]netip.Prefix, 0, len(body.IPV4Prefixes)),
|
||||
V6: make([]netip.Prefix, 0, len(body.IPV6Prefixes)),
|
||||
}
|
||||
|
||||
for _, p := range body.IPV4Prefixes {
|
||||
prefix, err := netip.ParsePrefix(p.Prefix)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse ip prefix: %w", err)
|
||||
}
|
||||
if prefix.Addr().Is6() {
|
||||
return nil, xerrors.Errorf("ipv4 prefix contains ipv6 address: %s", p.Prefix)
|
||||
}
|
||||
out.V4 = append(out.V4, prefix)
|
||||
}
|
||||
|
||||
for _, p := range body.IPV6Prefixes {
|
||||
prefix, err := netip.ParsePrefix(p.Prefix)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse ip prefix: %w", err)
|
||||
}
|
||||
if prefix.Addr().Is4() {
|
||||
return nil, xerrors.Errorf("ipv6 prefix contains ipv4 address: %s", p.Prefix)
|
||||
}
|
||||
out.V6 = append(out.V6, prefix)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// CheckIP checks if the given IP address is an AWS IP.
|
||||
func (r *AWSIPRanges) CheckIP(ip netip.Addr) bool {
|
||||
if ip.IsLoopback() || ip.IsLinkLocalMulticast() || ip.IsLinkLocalUnicast() || ip.IsPrivate() {
|
||||
return false
|
||||
}
|
||||
|
||||
if ip.Is4() {
|
||||
for _, p := range r.V4 {
|
||||
if p.Contains(ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, p := range r.V6 {
|
||||
if p.Contains(ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -0,0 +1,96 @@
|
||||
package cliutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/netip"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
func TestIPV4Check(t *testing.T) {
|
||||
t.Parallel()
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
httpapi.Write(context.Background(), w, http.StatusOK, awsIPRangesResponse{
|
||||
IPV4Prefixes: []awsIPv4Prefix{
|
||||
{
|
||||
Prefix: "3.24.0.0/14",
|
||||
},
|
||||
{
|
||||
Prefix: "15.230.15.29/32",
|
||||
},
|
||||
{
|
||||
Prefix: "47.128.82.100/31",
|
||||
},
|
||||
},
|
||||
IPV6Prefixes: []awsIPv6Prefix{
|
||||
{
|
||||
Prefix: "2600:9000:5206::/48",
|
||||
},
|
||||
{
|
||||
Prefix: "2406:da70:8800::/40",
|
||||
},
|
||||
{
|
||||
Prefix: "2600:1f68:5000::/40",
|
||||
},
|
||||
},
|
||||
})
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
ranges, err := FetchAWSIPRanges(ctx, srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("Private/IPV4", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ip, err := netip.ParseAddr("192.168.0.1")
|
||||
require.NoError(t, err)
|
||||
isAws := ranges.CheckIP(ip)
|
||||
require.False(t, isAws)
|
||||
})
|
||||
|
||||
t.Run("AWS/IPV4", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ip, err := netip.ParseAddr("3.25.61.113")
|
||||
require.NoError(t, err)
|
||||
isAws := ranges.CheckIP(ip)
|
||||
require.True(t, isAws)
|
||||
})
|
||||
|
||||
t.Run("NonAWS/IPV4", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ip, err := netip.ParseAddr("159.196.123.40")
|
||||
require.NoError(t, err)
|
||||
isAws := ranges.CheckIP(ip)
|
||||
require.False(t, isAws)
|
||||
})
|
||||
|
||||
t.Run("Private/IPV6", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ip, err := netip.ParseAddr("::1")
|
||||
require.NoError(t, err)
|
||||
isAws := ranges.CheckIP(ip)
|
||||
require.False(t, isAws)
|
||||
})
|
||||
|
||||
t.Run("AWS/IPV6", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ip, err := netip.ParseAddr("2600:9000:5206:0001:0000:0000:0000:0001")
|
||||
require.NoError(t, err)
|
||||
isAws := ranges.CheckIP(ip)
|
||||
require.True(t, isAws)
|
||||
})
|
||||
|
||||
t.Run("NonAWS/IPV6", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ip, err := netip.ParseAddr("2403:5807:885f:0:a544:49d4:58f8:aedf")
|
||||
require.NoError(t, err)
|
||||
isAws := ranges.CheckIP(ip)
|
||||
require.False(t, isAws)
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,97 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/serpent"
|
||||
"github.com/coder/serpent/completion"
|
||||
)
|
||||
|
||||
func (*RootCmd) completion() *serpent.Command {
|
||||
var shellName string
|
||||
var printOutput bool
|
||||
shellOptions := completion.ShellOptions(&shellName)
|
||||
return &serpent.Command{
|
||||
Use: "completion",
|
||||
Short: "Install or update shell completion scripts for the detected or chosen shell.",
|
||||
Options: []serpent.Option{
|
||||
{
|
||||
Flag: "shell",
|
||||
FlagShorthand: "s",
|
||||
Description: "The shell to install completion for.",
|
||||
Value: shellOptions,
|
||||
},
|
||||
{
|
||||
Flag: "print",
|
||||
Description: "Print the completion script instead of installing it.",
|
||||
FlagShorthand: "p",
|
||||
|
||||
Value: serpent.BoolOf(&printOutput),
|
||||
},
|
||||
},
|
||||
Handler: func(inv *serpent.Invocation) error {
|
||||
if shellName != "" {
|
||||
shell, err := completion.ShellByName(shellName, inv.Command.Parent.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if printOutput {
|
||||
return shell.WriteCompletion(inv.Stdout)
|
||||
}
|
||||
return installCompletion(inv, shell)
|
||||
}
|
||||
shell, err := completion.DetectUserShell(inv.Command.Parent.Name())
|
||||
if err == nil {
|
||||
return installCompletion(inv, shell)
|
||||
}
|
||||
if !isTTYOut(inv) {
|
||||
return xerrors.New("could not detect the current shell, please specify one with --shell or run interactively")
|
||||
}
|
||||
// Silently continue to the shell selection if detecting failed in interactive mode
|
||||
choice, err := cliui.Select(inv, cliui.SelectOptions{
|
||||
Message: "Select a shell to install completion for:",
|
||||
Options: shellOptions.Choices,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
shellChoice, err := completion.ShellByName(choice, inv.Command.Parent.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if printOutput {
|
||||
return shellChoice.WriteCompletion(inv.Stdout)
|
||||
}
|
||||
return installCompletion(inv, shellChoice)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func installCompletion(inv *serpent.Invocation, shell completion.Shell) error {
|
||||
path, err := shell.InstallPath()
|
||||
if err != nil {
|
||||
cliui.Error(inv.Stderr, fmt.Sprintf("Failed to determine completion path %v", err))
|
||||
return shell.WriteCompletion(inv.Stdout)
|
||||
}
|
||||
if !isTTYOut(inv) {
|
||||
return shell.WriteCompletion(inv.Stdout)
|
||||
}
|
||||
choice, err := cliui.Select(inv, cliui.SelectOptions{
|
||||
Options: []string{
|
||||
"Confirm",
|
||||
"Print to terminal",
|
||||
},
|
||||
Message: fmt.Sprintf("Install completion for %s at %s?", shell.Name(), path),
|
||||
HideSearch: true,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if choice == "Print to terminal" {
|
||||
return shell.WriteCompletion(inv.Stdout)
|
||||
}
|
||||
return completion.InstallShellCompletion(shell)
|
||||
}
|
||||
+2
-45
@@ -17,6 +17,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/cli/safeexec"
|
||||
"github.com/natefinch/atomic"
|
||||
"github.com/pkg/diff"
|
||||
"github.com/pkg/diff/write"
|
||||
"golang.org/x/exp/constraints"
|
||||
@@ -524,7 +525,7 @@ func (r *RootCmd) configSSH() *serpent.Command {
|
||||
}
|
||||
|
||||
if !bytes.Equal(configRaw, configModified) {
|
||||
err = writeWithTempFileAndMove(sshConfigFile, bytes.NewReader(configModified))
|
||||
err = atomic.WriteFile(sshConfigFile, bytes.NewReader(configModified))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("write ssh config failed: %w", err)
|
||||
}
|
||||
@@ -758,50 +759,6 @@ func sshConfigSplitOnCoderSection(data []byte) (before, section []byte, after []
|
||||
return data, nil, nil, nil
|
||||
}
|
||||
|
||||
// writeWithTempFileAndMove writes to a temporary file in the same
|
||||
// directory as path and renames the temp file to the file provided in
|
||||
// path. This ensure we avoid trashing the file we are writing due to
|
||||
// unforeseen circumstance like filesystem full, command killed, etc.
|
||||
func writeWithTempFileAndMove(path string, r io.Reader) (err error) {
|
||||
dir := filepath.Dir(path)
|
||||
name := filepath.Base(path)
|
||||
|
||||
// Ensure that e.g. the ~/.ssh directory exists.
|
||||
if err = os.MkdirAll(dir, 0o700); err != nil {
|
||||
return xerrors.Errorf("create directory: %w", err)
|
||||
}
|
||||
|
||||
// Create a tempfile in the same directory for ensuring write
|
||||
// operation does not fail.
|
||||
f, err := os.CreateTemp(dir, fmt.Sprintf(".%s.", name))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create temp file failed: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
_ = os.Remove(f.Name()) // Cleanup in case a step failed.
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = io.Copy(f, r)
|
||||
if err != nil {
|
||||
_ = f.Close()
|
||||
return xerrors.Errorf("write temp file failed: %w", err)
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("close temp file failed: %w", err)
|
||||
}
|
||||
|
||||
err = os.Rename(f.Name(), path)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("rename temp file failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sshConfigExecEscape quotes the string if it contains spaces, as per
|
||||
// `man 5 ssh_config`. However, OpenSSH uses exec in the users shell to
|
||||
// run the command, and as such the formatting/escape requirements
|
||||
|
||||
@@ -138,6 +138,7 @@ func Test_sshConfigSplitOnCoderSection(t *testing.T) {
|
||||
|
||||
// This test tries to mimic the behavior of OpenSSH
|
||||
// when executing e.g. a ProxyCommand.
|
||||
// nolint:tparallel
|
||||
func Test_sshConfigExecEscape(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -154,11 +155,10 @@ func Test_sshConfigExecEscape(t *testing.T) {
|
||||
{"tabs", "path with \ttabs", false},
|
||||
{"newline fails", "path with \nnewline", true},
|
||||
}
|
||||
// nolint:paralleltest // Fixes a flake
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Windows doesn't typically execute via /bin/sh or cmd.exe, so this test is not applicable.")
|
||||
}
|
||||
|
||||
+4
-8
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
@@ -184,7 +183,7 @@ func (r *RootCmd) dotfiles() *serpent.Command {
|
||||
}
|
||||
}
|
||||
|
||||
script := findScript(installScriptSet, files)
|
||||
script := findScript(installScriptSet, dotfilesDir)
|
||||
if script != "" {
|
||||
_, err = cliui.Prompt(inv, cliui.PromptOptions{
|
||||
Text: fmt.Sprintf("Running install script %s.\n\n Continue?", script),
|
||||
@@ -361,15 +360,12 @@ func dirExists(name string) (bool, error) {
|
||||
}
|
||||
|
||||
// findScript will find the first file that matches the script set.
|
||||
func findScript(scriptSet []string, files []fs.DirEntry) string {
|
||||
func findScript(scriptSet []string, directory string) string {
|
||||
for _, i := range scriptSet {
|
||||
for _, f := range files {
|
||||
if f.Name() == i {
|
||||
return f.Name()
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(directory, i)); err == nil {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
|
||||
@@ -142,6 +142,41 @@ func TestDotfiles(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(b), "wow\n")
|
||||
})
|
||||
|
||||
t.Run("NestedInstallScript", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("install scripts on windows require sh and aren't very practical")
|
||||
}
|
||||
_, root := clitest.New(t)
|
||||
testRepo := testGitRepo(t, root)
|
||||
|
||||
scriptPath := filepath.Join("script", "setup")
|
||||
err := os.MkdirAll(filepath.Join(testRepo, "script"), 0o750)
|
||||
require.NoError(t, err)
|
||||
// nolint:gosec
|
||||
err = os.WriteFile(filepath.Join(testRepo, scriptPath), []byte("#!/bin/bash\necho wow > "+filepath.Join(string(root), ".bashrc")), 0o750)
|
||||
require.NoError(t, err)
|
||||
|
||||
c := exec.Command("git", "add", scriptPath)
|
||||
c.Dir = testRepo
|
||||
err = c.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
c = exec.Command("git", "commit", "-m", `"add script"`)
|
||||
c.Dir = testRepo
|
||||
err = c.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
inv, _ := clitest.New(t, "dotfiles", "--global-config", string(root), "--symlink-dir", string(root), "-y", testRepo)
|
||||
err = inv.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
b, err := os.ReadFile(filepath.Join(string(root), ".bashrc"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(b), "wow\n")
|
||||
})
|
||||
|
||||
t.Run("InstallScriptChangeBranch", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if runtime.GOOS == "windows" {
|
||||
|
||||
+42
-8
@@ -117,7 +117,7 @@ func (s *scaletestTracingFlags) provider(ctx context.Context) (trace.TracerProvi
|
||||
}
|
||||
|
||||
var closeTracingOnce sync.Once
|
||||
return tracerProvider, func(ctx context.Context) error {
|
||||
return tracerProvider, func(_ context.Context) error {
|
||||
var err error
|
||||
closeTracingOnce.Do(func() {
|
||||
// Allow time to upload traces even if ctx is canceled
|
||||
@@ -430,7 +430,7 @@ func (r *RootCmd) scaletestCleanup() *serpent.Command {
|
||||
}
|
||||
|
||||
cliui.Infof(inv.Stdout, "Fetching scaletest workspaces...")
|
||||
workspaces, err := getScaletestWorkspaces(ctx, client, template)
|
||||
workspaces, _, err := getScaletestWorkspaces(ctx, client, "", template)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -863,6 +863,7 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
tickInterval time.Duration
|
||||
bytesPerTick int64
|
||||
ssh bool
|
||||
useHostLogin bool
|
||||
app string
|
||||
template string
|
||||
targetWorkspaces string
|
||||
@@ -926,10 +927,18 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
return xerrors.Errorf("get app host: %w", err)
|
||||
}
|
||||
|
||||
workspaces, err := getScaletestWorkspaces(inv.Context(), client, template)
|
||||
var owner string
|
||||
if useHostLogin {
|
||||
owner = codersdk.Me
|
||||
}
|
||||
|
||||
workspaces, numSkipped, err := getScaletestWorkspaces(inv.Context(), client, owner, template)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if numSkipped > 0 {
|
||||
cliui.Warnf(inv.Stdout, "CODER_DISABLE_OWNER_WORKSPACE_ACCESS is set on the deployment.\n\t%d workspace(s) were skipped due to ownership mismatch.\n\tSet --use-host-login to only target workspaces you own.", numSkipped)
|
||||
}
|
||||
|
||||
if targetWorkspaceEnd == 0 {
|
||||
targetWorkspaceEnd = len(workspaces)
|
||||
@@ -1092,6 +1101,13 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *serpent.Command {
|
||||
Description: "Send WebSocket traffic to a workspace app (proxied via coderd), cannot be used with --ssh.",
|
||||
Value: serpent.StringOf(&app),
|
||||
},
|
||||
{
|
||||
Flag: "use-host-login",
|
||||
Env: "CODER_SCALETEST_USE_HOST_LOGIN",
|
||||
Default: "false",
|
||||
Description: "Connect as the currently logged in user.",
|
||||
Value: serpent.BoolOf(&useHostLogin),
|
||||
},
|
||||
}
|
||||
|
||||
tracingFlags.attach(&cmd.Options)
|
||||
@@ -1378,22 +1394,35 @@ func isScaleTestWorkspace(workspace codersdk.Workspace) bool {
|
||||
strings.HasPrefix(workspace.Name, "scaletest-")
|
||||
}
|
||||
|
||||
func getScaletestWorkspaces(ctx context.Context, client *codersdk.Client, template string) ([]codersdk.Workspace, error) {
|
||||
func getScaletestWorkspaces(ctx context.Context, client *codersdk.Client, owner, template string) ([]codersdk.Workspace, int, error) {
|
||||
var (
|
||||
pageNumber = 0
|
||||
limit = 100
|
||||
workspaces []codersdk.Workspace
|
||||
skipped int
|
||||
)
|
||||
|
||||
me, err := client.User(ctx, codersdk.Me)
|
||||
if err != nil {
|
||||
return nil, 0, xerrors.Errorf("check logged-in user")
|
||||
}
|
||||
|
||||
dv, err := client.DeploymentConfig(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, xerrors.Errorf("fetch deployment config: %w", err)
|
||||
}
|
||||
noOwnerAccess := dv.Values != nil && dv.Values.DisableOwnerWorkspaceExec.Value()
|
||||
|
||||
for {
|
||||
page, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{
|
||||
Name: "scaletest-",
|
||||
Template: template,
|
||||
Owner: owner,
|
||||
Offset: pageNumber * limit,
|
||||
Limit: limit,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("fetch scaletest workspaces page %d: %w", pageNumber, err)
|
||||
return nil, 0, xerrors.Errorf("fetch scaletest workspaces page %d: %w", pageNumber, err)
|
||||
}
|
||||
|
||||
pageNumber++
|
||||
@@ -1403,13 +1432,18 @@ func getScaletestWorkspaces(ctx context.Context, client *codersdk.Client, templa
|
||||
|
||||
pageWorkspaces := make([]codersdk.Workspace, 0, len(page.Workspaces))
|
||||
for _, w := range page.Workspaces {
|
||||
if isScaleTestWorkspace(w) {
|
||||
pageWorkspaces = append(pageWorkspaces, w)
|
||||
if !isScaleTestWorkspace(w) {
|
||||
continue
|
||||
}
|
||||
if noOwnerAccess && w.OwnerID != me.ID {
|
||||
skipped++
|
||||
continue
|
||||
}
|
||||
pageWorkspaces = append(pageWorkspaces, w)
|
||||
}
|
||||
workspaces = append(workspaces, pageWorkspaces...)
|
||||
}
|
||||
return workspaces, nil
|
||||
return workspaces, skipped, nil
|
||||
}
|
||||
|
||||
func getScaletestUsers(ctx context.Context, client *codersdk.Client) ([]codersdk.User, error) {
|
||||
|
||||
@@ -0,0 +1,70 @@
|
||||
package exptest_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
// This test validates that the scaletest CLI filters out workspaces not owned
|
||||
// when disable owner workspace access is set.
|
||||
// This test is in its own package because it mutates a global variable that
|
||||
// can influence other tests in the same package.
|
||||
// nolint:paralleltest
|
||||
func TestScaleTestWorkspaceTraffic_UseHostLogin(t *testing.T) {
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitMedium)
|
||||
defer cancelFunc()
|
||||
|
||||
log := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true})
|
||||
client := coderdtest.New(t, &coderdtest.Options{
|
||||
Logger: &log,
|
||||
IncludeProvisionerDaemon: true,
|
||||
DeploymentValues: coderdtest.DeploymentValues(t, func(dv *codersdk.DeploymentValues) {
|
||||
dv.DisableOwnerWorkspaceExec = true
|
||||
}),
|
||||
})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
tv := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, nil)
|
||||
_ = coderdtest.AwaitTemplateVersionJobCompleted(t, client, tv.ID)
|
||||
tpl := coderdtest.CreateTemplate(t, client, owner.OrganizationID, tv.ID)
|
||||
// Create a workspace owned by a different user
|
||||
memberClient, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
_ = coderdtest.CreateWorkspace(t, memberClient, tpl.ID, func(cwr *codersdk.CreateWorkspaceRequest) {
|
||||
cwr.Name = "scaletest-workspace"
|
||||
})
|
||||
|
||||
// Test without --use-host-login first.g
|
||||
inv, root := clitest.New(t, "exp", "scaletest", "workspace-traffic",
|
||||
"--template", tpl.Name,
|
||||
)
|
||||
// nolint:gocritic // We are intentionally testing this as the owner.
|
||||
clitest.SetupConfig(t, client, root)
|
||||
var stdoutBuf bytes.Buffer
|
||||
inv.Stdout = &stdoutBuf
|
||||
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.ErrorContains(t, err, "no scaletest workspaces exist")
|
||||
require.Contains(t, stdoutBuf.String(), `1 workspace(s) were skipped`)
|
||||
|
||||
// Test once again with --use-host-login.
|
||||
inv, root = clitest.New(t, "exp", "scaletest", "workspace-traffic",
|
||||
"--template", tpl.Name,
|
||||
"--use-host-login",
|
||||
)
|
||||
// nolint:gocritic // We are intentionally testing this as the owner.
|
||||
clitest.SetupConfig(t, client, root)
|
||||
stdoutBuf.Reset()
|
||||
inv.Stdout = &stdoutBuf
|
||||
|
||||
err = inv.WithContext(ctx).Run()
|
||||
require.ErrorContains(t, err, "no scaletest workspaces exist")
|
||||
require.NotContains(t, stdoutBuf.String(), `1 workspace(s) were skipped`)
|
||||
}
|
||||
@@ -81,6 +81,8 @@ var usageTemplate = func() *template.Template {
|
||||
switch v := opt.Value.(type) {
|
||||
case *serpent.Enum:
|
||||
return strings.Join(v.Choices, "|")
|
||||
case *serpent.EnumArray:
|
||||
return fmt.Sprintf("[%s]", strings.Join(v.Choices, "|"))
|
||||
default:
|
||||
return v.Type()
|
||||
}
|
||||
|
||||
@@ -16,6 +16,16 @@ import (
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
func createOpts(t *testing.T) *coderdtest.Options {
|
||||
t.Helper()
|
||||
|
||||
dt := coderdtest.DeploymentValues(t)
|
||||
dt.Experiments = []string{string(codersdk.ExperimentNotifications)}
|
||||
return &coderdtest.Options{
|
||||
DeploymentValues: dt,
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotifications(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -42,7 +52,7 @@ func TestNotifications(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
ownerClient, db := coderdtest.NewWithDatabase(t, nil)
|
||||
ownerClient, db := coderdtest.NewWithDatabase(t, createOpts(t))
|
||||
_ = coderdtest.CreateFirstUser(t, ownerClient)
|
||||
|
||||
// when
|
||||
@@ -72,7 +82,7 @@ func TestPauseNotifications_RegularUser(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
ownerClient, db := coderdtest.NewWithDatabase(t, nil)
|
||||
ownerClient, db := coderdtest.NewWithDatabase(t, createOpts(t))
|
||||
owner := coderdtest.CreateFirstUser(t, ownerClient)
|
||||
anotherClient, _ := coderdtest.CreateAnotherUser(t, ownerClient, owner.OrganizationID)
|
||||
|
||||
@@ -87,7 +97,7 @@ func TestPauseNotifications_RegularUser(t *testing.T) {
|
||||
require.Error(t, err)
|
||||
require.ErrorAsf(t, err, &sdkError, "error should be of type *codersdk.Error")
|
||||
assert.Equal(t, http.StatusForbidden, sdkError.StatusCode())
|
||||
assert.Contains(t, sdkError.Message, "Insufficient permissions to update notifications settings.")
|
||||
assert.Contains(t, sdkError.Message, "Forbidden.")
|
||||
|
||||
// then
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
|
||||
|
||||
@@ -137,7 +137,7 @@ func (r *RootCmd) assignOrganizationRoles(orgContext *OrganizationContext) *serp
|
||||
|
||||
func (r *RootCmd) listOrganizationMembers(orgContext *OrganizationContext) *serpent.Command {
|
||||
formatter := cliui.NewOutputFormatter(
|
||||
cliui.TableFormat([]codersdk.OrganizationMemberWithUserData{}, []string{"username", "organization_roles"}),
|
||||
cliui.TableFormat([]codersdk.OrganizationMemberWithUserData{}, []string{"username", "organization roles"}),
|
||||
cliui.JSONFormat(),
|
||||
)
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ func TestListOrganizationMembers(t *testing.T) {
|
||||
client, user := coderdtest.CreateAnotherUser(t, ownerClient, owner.OrganizationID, rbac.RoleUserAdmin())
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
inv, root := clitest.New(t, "organization", "members", "list", "-c", "user_id,username,roles")
|
||||
inv, root := clitest.New(t, "organization", "members", "list", "-c", "user id,username,organization roles")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
@@ -34,49 +34,3 @@ func TestListOrganizationMembers(t *testing.T) {
|
||||
require.Contains(t, buf.String(), owner.UserID.String())
|
||||
})
|
||||
}
|
||||
|
||||
func TestRemoveOrganizationMembers(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ownerClient := coderdtest.New(t, &coderdtest.Options{})
|
||||
owner := coderdtest.CreateFirstUser(t, ownerClient)
|
||||
orgAdminClient, _ := coderdtest.CreateAnotherUser(t, ownerClient, owner.OrganizationID, rbac.ScopedRoleOrgAdmin(owner.OrganizationID))
|
||||
_, user := coderdtest.CreateAnotherUser(t, ownerClient, owner.OrganizationID)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
|
||||
inv, root := clitest.New(t, "organization", "members", "remove", "-O", owner.OrganizationID.String(), user.Username)
|
||||
clitest.SetupConfig(t, orgAdminClient, root)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
members, err := orgAdminClient.OrganizationMembers(ctx, owner.OrganizationID)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, members, 2)
|
||||
})
|
||||
|
||||
t.Run("UserNotExists", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ownerClient := coderdtest.New(t, &coderdtest.Options{})
|
||||
owner := coderdtest.CreateFirstUser(t, ownerClient)
|
||||
orgAdminClient, _ := coderdtest.CreateAnotherUser(t, ownerClient, owner.OrganizationID, rbac.ScopedRoleOrgAdmin(owner.OrganizationID))
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
|
||||
inv, root := clitest.New(t, "organization", "members", "remove", "-O", owner.OrganizationID.String(), "random_name")
|
||||
clitest.SetupConfig(t, orgAdminClient, root)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.ErrorContains(t, err, "must be an existing uuid or username")
|
||||
})
|
||||
}
|
||||
|
||||
+39
-18
@@ -36,7 +36,7 @@ func (r *RootCmd) organizationRoles(orgContext *OrganizationContext) *serpent.Co
|
||||
func (r *RootCmd) showOrganizationRoles(orgContext *OrganizationContext) *serpent.Command {
|
||||
formatter := cliui.NewOutputFormatter(
|
||||
cliui.ChangeFormatterData(
|
||||
cliui.TableFormat([]roleTableRow{}, []string{"name", "display_name", "site_permissions", "organization_permissions", "user_permissions"}),
|
||||
cliui.TableFormat([]roleTableRow{}, []string{"name", "display name", "site permissions", "organization permissions", "user permissions"}),
|
||||
func(data any) (any, error) {
|
||||
inputs, ok := data.([]codersdk.AssignableRoles)
|
||||
if !ok {
|
||||
@@ -103,7 +103,7 @@ func (r *RootCmd) showOrganizationRoles(orgContext *OrganizationContext) *serpen
|
||||
func (r *RootCmd) editOrganizationRole(orgContext *OrganizationContext) *serpent.Command {
|
||||
formatter := cliui.NewOutputFormatter(
|
||||
cliui.ChangeFormatterData(
|
||||
cliui.TableFormat([]roleTableRow{}, []string{"name", "display_name", "site_permissions", "organization_permissions", "user_permissions"}),
|
||||
cliui.TableFormat([]roleTableRow{}, []string{"name", "display name", "site permissions", "organization permissions", "user permissions"}),
|
||||
func(data any) (any, error) {
|
||||
typed, _ := data.(codersdk.Role)
|
||||
return []roleTableRow{roleToTableView(typed)}, nil
|
||||
@@ -153,6 +153,7 @@ func (r *RootCmd) editOrganizationRole(orgContext *OrganizationContext) *serpent
|
||||
return err
|
||||
}
|
||||
|
||||
createNewRole := true
|
||||
var customRole codersdk.Role
|
||||
if jsonInput {
|
||||
// JSON Upload mode
|
||||
@@ -174,17 +175,30 @@ func (r *RootCmd) editOrganizationRole(orgContext *OrganizationContext) *serpent
|
||||
}
|
||||
return xerrors.Errorf("json input does not appear to be a valid role")
|
||||
}
|
||||
|
||||
existingRoles, err := client.ListOrganizationRoles(ctx, org.ID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("listing existing roles: %w", err)
|
||||
}
|
||||
for _, existingRole := range existingRoles {
|
||||
if strings.EqualFold(customRole.Name, existingRole.Name) {
|
||||
// Editing an existing role
|
||||
createNewRole = false
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if len(inv.Args) == 0 {
|
||||
return xerrors.Errorf("missing role name argument, usage: \"coder organizations roles edit <role_name>\"")
|
||||
}
|
||||
|
||||
interactiveRole, err := interactiveOrgRoleEdit(inv, org.ID, client)
|
||||
interactiveRole, newRole, err := interactiveOrgRoleEdit(inv, org.ID, client)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("editing role: %w", err)
|
||||
}
|
||||
|
||||
customRole = *interactiveRole
|
||||
createNewRole = newRole
|
||||
|
||||
preview := fmt.Sprintf("permissions: %d site, %d org, %d user",
|
||||
len(customRole.SitePermissions), len(customRole.OrganizationPermissions), len(customRole.UserPermissions))
|
||||
@@ -203,7 +217,12 @@ func (r *RootCmd) editOrganizationRole(orgContext *OrganizationContext) *serpent
|
||||
// Do not actually post
|
||||
updated = customRole
|
||||
} else {
|
||||
updated, err = client.PatchOrganizationRole(ctx, customRole)
|
||||
switch createNewRole {
|
||||
case true:
|
||||
updated, err = client.CreateOrganizationRole(ctx, customRole)
|
||||
default:
|
||||
updated, err = client.UpdateOrganizationRole(ctx, customRole)
|
||||
}
|
||||
if err != nil {
|
||||
return xerrors.Errorf("patch role: %w", err)
|
||||
}
|
||||
@@ -223,11 +242,12 @@ func (r *RootCmd) editOrganizationRole(orgContext *OrganizationContext) *serpent
|
||||
return cmd
|
||||
}
|
||||
|
||||
func interactiveOrgRoleEdit(inv *serpent.Invocation, orgID uuid.UUID, client *codersdk.Client) (*codersdk.Role, error) {
|
||||
func interactiveOrgRoleEdit(inv *serpent.Invocation, orgID uuid.UUID, client *codersdk.Client) (*codersdk.Role, bool, error) {
|
||||
newRole := false
|
||||
ctx := inv.Context()
|
||||
roles, err := client.ListOrganizationRoles(ctx, orgID)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("listing roles: %w", err)
|
||||
return nil, newRole, xerrors.Errorf("listing roles: %w", err)
|
||||
}
|
||||
|
||||
// Make sure the role actually exists first
|
||||
@@ -246,22 +266,23 @@ func interactiveOrgRoleEdit(inv *serpent.Invocation, orgID uuid.UUID, client *co
|
||||
IsConfirm: true,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("abort: %w", err)
|
||||
return nil, newRole, xerrors.Errorf("abort: %w", err)
|
||||
}
|
||||
|
||||
originalRole.Role = codersdk.Role{
|
||||
Name: inv.Args[0],
|
||||
OrganizationID: orgID.String(),
|
||||
}
|
||||
newRole = true
|
||||
}
|
||||
|
||||
// Some checks since interactive mode is limited in what it currently sees
|
||||
if len(originalRole.SitePermissions) > 0 {
|
||||
return nil, xerrors.Errorf("unable to edit role in interactive mode, it contains site wide permissions")
|
||||
return nil, newRole, xerrors.Errorf("unable to edit role in interactive mode, it contains site wide permissions")
|
||||
}
|
||||
|
||||
if len(originalRole.UserPermissions) > 0 {
|
||||
return nil, xerrors.Errorf("unable to edit role in interactive mode, it contains user permissions")
|
||||
return nil, newRole, xerrors.Errorf("unable to edit role in interactive mode, it contains user permissions")
|
||||
}
|
||||
|
||||
role := &originalRole.Role
|
||||
@@ -283,13 +304,13 @@ customRoleLoop:
|
||||
Options: append(permissionPreviews(role, allowedResources), done, abort),
|
||||
})
|
||||
if err != nil {
|
||||
return role, xerrors.Errorf("selecting resource: %w", err)
|
||||
return role, newRole, xerrors.Errorf("selecting resource: %w", err)
|
||||
}
|
||||
switch selected {
|
||||
case done:
|
||||
break customRoleLoop
|
||||
case abort:
|
||||
return role, xerrors.Errorf("edit role %q aborted", role.Name)
|
||||
return role, newRole, xerrors.Errorf("edit role %q aborted", role.Name)
|
||||
default:
|
||||
strs := strings.Split(selected, "::")
|
||||
resource := strings.TrimSpace(strs[0])
|
||||
@@ -300,7 +321,7 @@ customRoleLoop:
|
||||
Defaults: defaultActions(role, resource),
|
||||
})
|
||||
if err != nil {
|
||||
return role, xerrors.Errorf("selecting actions for resource %q: %w", resource, err)
|
||||
return role, newRole, xerrors.Errorf("selecting actions for resource %q: %w", resource, err)
|
||||
}
|
||||
applyOrgResourceActions(role, resource, actions)
|
||||
// back to resources!
|
||||
@@ -309,7 +330,7 @@ customRoleLoop:
|
||||
// This println is required because the prompt ends us on the same line as some text.
|
||||
_, _ = fmt.Println()
|
||||
|
||||
return role, nil
|
||||
return role, newRole, nil
|
||||
}
|
||||
|
||||
func applyOrgResourceActions(role *codersdk.Role, resource string, actions []string) {
|
||||
@@ -387,10 +408,10 @@ func roleToTableView(role codersdk.Role) roleTableRow {
|
||||
|
||||
type roleTableRow struct {
|
||||
Name string `table:"name,default_sort"`
|
||||
DisplayName string `table:"display_name"`
|
||||
OrganizationID string `table:"organization_id"`
|
||||
SitePermissions string ` table:"site_permissions"`
|
||||
DisplayName string `table:"display name"`
|
||||
OrganizationID string `table:"organization id"`
|
||||
SitePermissions string ` table:"site permissions"`
|
||||
// map[<org_id>] -> Permissions
|
||||
OrganizationPermissions string `table:"organization_permissions"`
|
||||
UserPermissions string `table:"user_permissions"`
|
||||
OrganizationPermissions string `table:"organization permissions"`
|
||||
UserPermissions string `table:"user permissions"`
|
||||
}
|
||||
|
||||
+75
-4
@@ -2,10 +2,14 @@ package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
"tailscale.com/tailcfg"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"cdr.dev/slog/sloggers/sloghuman"
|
||||
@@ -13,7 +17,9 @@ import (
|
||||
"github.com/coder/pretty"
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/cli/cliutil"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/healthsdk"
|
||||
"github.com/coder/coder/v2/codersdk/workspacesdk"
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
@@ -61,7 +67,8 @@ func (r *RootCmd) ping() *serpent.Command {
|
||||
if !r.disableNetworkTelemetry {
|
||||
opts.EnableTelemetry = true
|
||||
}
|
||||
conn, err := workspacesdk.New(client).DialAgent(ctx, workspaceAgent.ID, opts)
|
||||
client := workspacesdk.New(client)
|
||||
conn, err := client.DialAgent(ctx, workspaceAgent.ID, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -138,11 +145,56 @@ func (r *RootCmd) ping() *serpent.Command {
|
||||
)
|
||||
|
||||
if n == int(pingNum) {
|
||||
diags := conn.GetPeerDiagnostics()
|
||||
cliui.PeerDiagnostics(inv.Stdout, diags)
|
||||
return nil
|
||||
break
|
||||
}
|
||||
}
|
||||
diagCtx, diagCancel := context.WithTimeout(inv.Context(), 30*time.Second)
|
||||
defer diagCancel()
|
||||
diags := conn.GetPeerDiagnostics()
|
||||
cliui.PeerDiagnostics(inv.Stdout, diags)
|
||||
|
||||
ni := conn.GetNetInfo()
|
||||
connDiags := cliui.ConnDiags{
|
||||
PingP2P: didP2p,
|
||||
DisableDirect: r.disableDirect,
|
||||
LocalNetInfo: ni,
|
||||
Verbose: r.verbose,
|
||||
}
|
||||
|
||||
awsRanges, err := cliutil.FetchAWSIPRanges(diagCtx, cliutil.AWSIPRangesURL)
|
||||
if err != nil {
|
||||
opts.Logger.Debug(inv.Context(), "failed to retrieve AWS IP ranges", slog.Error(err))
|
||||
}
|
||||
|
||||
connDiags.ClientIPIsAWS = isAWSIP(awsRanges, ni)
|
||||
|
||||
connInfo, err := client.AgentConnectionInfoGeneric(diagCtx)
|
||||
if err != nil || connInfo.DERPMap == nil {
|
||||
return xerrors.Errorf("Failed to retrieve connection info from server: %w\n", err)
|
||||
}
|
||||
connDiags.ConnInfo = connInfo
|
||||
ifReport, err := healthsdk.RunInterfacesReport()
|
||||
if err == nil {
|
||||
connDiags.LocalInterfaces = &ifReport
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "Failed to retrieve local interfaces report: %v\n", err)
|
||||
}
|
||||
|
||||
agentNetcheck, err := conn.Netcheck(diagCtx)
|
||||
if err == nil {
|
||||
connDiags.AgentNetcheck = &agentNetcheck
|
||||
connDiags.AgentIPIsAWS = isAWSIP(awsRanges, agentNetcheck.NetInfo)
|
||||
} else {
|
||||
var sdkErr *codersdk.Error
|
||||
if errors.As(err, &sdkErr) && sdkErr.StatusCode() == http.StatusNotFound {
|
||||
_, _ = fmt.Fprint(inv.Stdout, "Could not generate full connection report as the workspace agent is outdated\n")
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "Failed to retrieve connection report from agent: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
connDiags.Write(inv.Stdout)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -170,3 +222,22 @@ func (r *RootCmd) ping() *serpent.Command {
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func isAWSIP(awsRanges *cliutil.AWSIPRanges, ni *tailcfg.NetInfo) bool {
|
||||
if awsRanges == nil {
|
||||
return false
|
||||
}
|
||||
if ni.GlobalV4 != "" {
|
||||
ip, err := netip.ParseAddr(ni.GlobalV4)
|
||||
if err == nil && awsRanges.CheckIP(ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if ni.GlobalV6 != "" {
|
||||
ip, err := netip.ParseAddr(ni.GlobalV6)
|
||||
if err == nil && awsRanges.CheckIP(ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -67,6 +67,7 @@ func TestPing(t *testing.T) {
|
||||
|
||||
pty.ExpectMatch("pong from " + workspace.Name)
|
||||
pty.ExpectMatch("✔ received remote agent data from Coder networking coordinator")
|
||||
pty.ExpectMatch("✔ You are connected directly (p2p)")
|
||||
cancel()
|
||||
<-cmdDone
|
||||
})
|
||||
|
||||
+1
-1
@@ -31,7 +31,7 @@ func (r *RootCmd) rename() *serpent.Command {
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "%s\n\n",
|
||||
pretty.Sprint(cliui.DefaultStyles.Wrap, "WARNING: A rename can result in data loss if a resource references the workspace name in the template (e.g volumes). Please backup any data before proceeding."),
|
||||
)
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "See: %s\n\n", "https://coder.com/docs/coder-oss/latest/templates/resource-persistence#%EF%B8%8F-persistence-pitfalls")
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "See: %s\n\n", "https://coder.com/docs/templates/resource-persistence#%EF%B8%8F-persistence-pitfalls")
|
||||
_, err = cliui.Prompt(inv, cliui.PromptOptions{
|
||||
Text: fmt.Sprintf("Type %q to confirm rename:", workspace.Name),
|
||||
Validate: func(s string) error {
|
||||
|
||||
+45
-38
@@ -82,6 +82,7 @@ const (
|
||||
func (r *RootCmd) CoreSubcommands() []*serpent.Command {
|
||||
// Please re-sort this list alphabetically if you change it!
|
||||
return []*serpent.Command{
|
||||
r.completion(),
|
||||
r.dotfiles(),
|
||||
r.externalAuth(),
|
||||
r.login(),
|
||||
@@ -549,44 +550,7 @@ func (r *RootCmd) InitClient(client *codersdk.Client) serpent.MiddlewareFunc {
|
||||
// HeaderTransport creates a new transport that executes `--header-command`
|
||||
// if it is set to add headers for all outbound requests.
|
||||
func (r *RootCmd) HeaderTransport(ctx context.Context, serverURL *url.URL) (*codersdk.HeaderTransport, error) {
|
||||
transport := &codersdk.HeaderTransport{
|
||||
Transport: http.DefaultTransport,
|
||||
Header: http.Header{},
|
||||
}
|
||||
headers := r.header
|
||||
if r.headerCommand != "" {
|
||||
shell := "sh"
|
||||
caller := "-c"
|
||||
if runtime.GOOS == "windows" {
|
||||
shell = "cmd.exe"
|
||||
caller = "/c"
|
||||
}
|
||||
var outBuf bytes.Buffer
|
||||
// #nosec
|
||||
cmd := exec.CommandContext(ctx, shell, caller, r.headerCommand)
|
||||
cmd.Env = append(os.Environ(), "CODER_URL="+serverURL.String())
|
||||
cmd.Stdout = &outBuf
|
||||
cmd.Stderr = io.Discard
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to run %v: %w", cmd.Args, err)
|
||||
}
|
||||
scanner := bufio.NewScanner(&outBuf)
|
||||
for scanner.Scan() {
|
||||
headers = append(headers, scanner.Text())
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, xerrors.Errorf("scan %v: %w", cmd.Args, err)
|
||||
}
|
||||
}
|
||||
for _, header := range headers {
|
||||
parts := strings.SplitN(header, "=", 2)
|
||||
if len(parts) < 2 {
|
||||
return nil, xerrors.Errorf("split header %q had less than two parts", header)
|
||||
}
|
||||
transport.Header.Add(parts[0], parts[1])
|
||||
}
|
||||
return transport, nil
|
||||
return headerTransport(ctx, serverURL, r.header, r.headerCommand)
|
||||
}
|
||||
|
||||
func (r *RootCmd) configureClient(ctx context.Context, client *codersdk.Client, serverURL *url.URL, inv *serpent.Invocation) error {
|
||||
@@ -1272,3 +1236,46 @@ type roundTripper func(req *http.Request) (*http.Response, error)
|
||||
func (r roundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
return r(req)
|
||||
}
|
||||
|
||||
// HeaderTransport creates a new transport that executes `--header-command`
|
||||
// if it is set to add headers for all outbound requests.
|
||||
func headerTransport(ctx context.Context, serverURL *url.URL, header []string, headerCommand string) (*codersdk.HeaderTransport, error) {
|
||||
transport := &codersdk.HeaderTransport{
|
||||
Transport: http.DefaultTransport,
|
||||
Header: http.Header{},
|
||||
}
|
||||
headers := header
|
||||
if headerCommand != "" {
|
||||
shell := "sh"
|
||||
caller := "-c"
|
||||
if runtime.GOOS == "windows" {
|
||||
shell = "cmd.exe"
|
||||
caller = "/c"
|
||||
}
|
||||
var outBuf bytes.Buffer
|
||||
// #nosec
|
||||
cmd := exec.CommandContext(ctx, shell, caller, headerCommand)
|
||||
cmd.Env = append(os.Environ(), "CODER_URL="+serverURL.String())
|
||||
cmd.Stdout = &outBuf
|
||||
cmd.Stderr = io.Discard
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to run %v: %w", cmd.Args, err)
|
||||
}
|
||||
scanner := bufio.NewScanner(&outBuf)
|
||||
for scanner.Scan() {
|
||||
headers = append(headers, scanner.Text())
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, xerrors.Errorf("scan %v: %w", cmd.Args, err)
|
||||
}
|
||||
}
|
||||
for _, header := range headers {
|
||||
parts := strings.SplitN(header, "=", 2)
|
||||
if len(parts) < 2 {
|
||||
return nil, xerrors.Errorf("split header %q had less than two parts", header)
|
||||
}
|
||||
transport.Header.Add(parts[0], parts[1])
|
||||
}
|
||||
return transport, nil
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ func setupTestSchedule(t *testing.T, sched *cron.Schedule) (ownerClient, memberC
|
||||
|
||||
ownerClient, db = coderdtest.NewWithDatabase(t, nil)
|
||||
owner := coderdtest.CreateFirstUser(t, ownerClient)
|
||||
memberClient, memberUser := coderdtest.CreateAnotherUserMutators(t, ownerClient, owner.OrganizationID, nil, func(r *codersdk.CreateUserRequest) {
|
||||
memberClient, memberUser := coderdtest.CreateAnotherUserMutators(t, ownerClient, owner.OrganizationID, nil, func(r *codersdk.CreateUserRequestWithOrgs) {
|
||||
r.Username = "testuser2" // ensure deterministic ordering
|
||||
})
|
||||
_ = dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
|
||||
+22
-9
@@ -55,7 +55,9 @@ import (
|
||||
|
||||
"cdr.dev/slog"
|
||||
"cdr.dev/slog/sloggers/sloghuman"
|
||||
"github.com/coder/coder/v2/coderd/entitlements"
|
||||
"github.com/coder/pretty"
|
||||
"github.com/coder/quartz"
|
||||
"github.com/coder/retry"
|
||||
"github.com/coder/serpent"
|
||||
"github.com/coder/wgtunnel/tunnelsdk"
|
||||
@@ -604,6 +606,7 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
|
||||
SSHConfigOptions: configSSHOptions,
|
||||
},
|
||||
AllowWorkspaceRenames: vals.AllowWorkspaceRenames.Value(),
|
||||
Entitlements: entitlements.New(),
|
||||
NotificationsEnqueuer: notifications.NewNoopEnqueuer(), // Changed further down if notifications enabled.
|
||||
}
|
||||
if httpServers.TLSConfig != nil {
|
||||
@@ -631,7 +634,7 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
|
||||
"new version of coder available",
|
||||
slog.F("new_version", r.Version),
|
||||
slog.F("url", r.URL),
|
||||
slog.F("upgrade_instructions", "https://coder.com/docs/coder-oss/latest/admin/upgrade"),
|
||||
slog.F("upgrade_instructions", "https://coder.com/docs/admin/upgrade"),
|
||||
)
|
||||
}
|
||||
},
|
||||
@@ -791,18 +794,26 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
|
||||
}
|
||||
}
|
||||
|
||||
keyBytes, err := hex.DecodeString(oauthSigningKeyStr)
|
||||
oauthKeyBytes, err := hex.DecodeString(oauthSigningKeyStr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("decode oauth signing key from database: %w", err)
|
||||
}
|
||||
if len(keyBytes) != len(options.OAuthSigningKey) {
|
||||
return xerrors.Errorf("oauth signing key in database is not the correct length, expect %d got %d", len(options.OAuthSigningKey), len(keyBytes))
|
||||
if len(oauthKeyBytes) != len(options.OAuthSigningKey) {
|
||||
return xerrors.Errorf("oauth signing key in database is not the correct length, expect %d got %d", len(options.OAuthSigningKey), len(oauthKeyBytes))
|
||||
}
|
||||
copy(options.OAuthSigningKey[:], keyBytes)
|
||||
copy(options.OAuthSigningKey[:], oauthKeyBytes)
|
||||
if options.OAuthSigningKey == [32]byte{} {
|
||||
return xerrors.Errorf("oauth signing key in database is empty")
|
||||
}
|
||||
|
||||
// Read the coordinator resume token signing key from the
|
||||
// database.
|
||||
resumeTokenKey, err := tailnet.ResumeTokenSigningKeyFromDatabase(ctx, tx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get coordinator resume token key from database: %w", err)
|
||||
}
|
||||
options.CoordinatorResumeTokenProvider = tailnet.NewResumeTokenKeyProvider(resumeTokenKey, quartz.NewReal(), tailnet.DefaultResumeTokenExpiry)
|
||||
|
||||
return nil
|
||||
}, nil)
|
||||
if err != nil {
|
||||
@@ -976,7 +987,7 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
|
||||
defer shutdownConns()
|
||||
|
||||
// Ensures that old database entries are cleaned up over time!
|
||||
purger := dbpurge.New(ctx, logger.Named("dbpurge"), options.Database)
|
||||
purger := dbpurge.New(ctx, logger.Named("dbpurge"), options.Database, quartz.NewReal())
|
||||
defer purger.Close()
|
||||
|
||||
// Updates workspace usage
|
||||
@@ -993,9 +1004,10 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
|
||||
if experiments.Enabled(codersdk.ExperimentNotifications) {
|
||||
cfg := options.DeploymentValues.Notifications
|
||||
metrics := notifications.NewMetrics(options.PrometheusRegistry)
|
||||
helpers := templateHelpers(options)
|
||||
|
||||
// The enqueuer is responsible for enqueueing notifications to the given store.
|
||||
enqueuer, err := notifications.NewStoreEnqueuer(cfg, options.Database, templateHelpers(options), logger.Named("notifications.enqueuer"))
|
||||
enqueuer, err := notifications.NewStoreEnqueuer(cfg, options.Database, helpers, logger.Named("notifications.enqueuer"), quartz.NewReal())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to instantiate notification store enqueuer: %w", err)
|
||||
}
|
||||
@@ -1004,7 +1016,7 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
|
||||
// The notification manager is responsible for:
|
||||
// - creating notifiers and managing their lifecycles (notifiers are responsible for dequeueing/sending notifications)
|
||||
// - keeping the store updated with status updates
|
||||
notificationsManager, err = notifications.NewManager(cfg, options.Database, metrics, logger.Named("notifications.manager"))
|
||||
notificationsManager, err = notifications.NewManager(cfg, options.Database, helpers, metrics, logger.Named("notifications.manager"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to instantiate notification manager: %w", err)
|
||||
}
|
||||
@@ -1291,7 +1303,8 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
|
||||
// We can later use this to inject whitelabel fields when app name / logo URL are overridden.
|
||||
func templateHelpers(options *coderd.Options) map[string]any {
|
||||
return map[string]any{
|
||||
"base_url": func() string { return options.AccessURL.String() },
|
||||
"base_url": func() string { return options.AccessURL.String() },
|
||||
"current_year": func() string { return strconv.Itoa(time.Now().Year()) },
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -83,12 +83,12 @@ func (r *RootCmd) newCreateAdminUserCommand() *serpent.Command {
|
||||
|
||||
validateInputs := func(username, email, password string) error {
|
||||
// Use the validator tags so we match the API's validation.
|
||||
req := codersdk.CreateUserRequest{
|
||||
Username: "username",
|
||||
Name: "Admin User",
|
||||
Email: "email@coder.com",
|
||||
Password: "ValidPa$$word123!",
|
||||
OrganizationID: uuid.New(),
|
||||
req := codersdk.CreateUserRequestWithOrgs{
|
||||
Username: "username",
|
||||
Name: "Admin User",
|
||||
Email: "email@coder.com",
|
||||
Password: "ValidPa$$word123!",
|
||||
OrganizationIDs: []uuid.UUID{uuid.New()},
|
||||
}
|
||||
if username != "" {
|
||||
req.Username = username
|
||||
@@ -176,7 +176,7 @@ func (r *RootCmd) newCreateAdminUserCommand() *serpent.Command {
|
||||
// Create the user.
|
||||
var newUser database.User
|
||||
err = db.InTx(func(tx database.Store) error {
|
||||
orgs, err := tx.GetOrganizations(ctx)
|
||||
orgs, err := tx.GetOrganizations(ctx, database.GetOrganizationsParams{})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get organizations: %w", err)
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ func TestServerCreateAdminUser(t *testing.T) {
|
||||
require.EqualValues(t, []string{codersdk.RoleOwner}, user.RBACRoles, "user does not have owner role")
|
||||
|
||||
// Check that user is admin in every org.
|
||||
orgs, err := db.GetOrganizations(ctx)
|
||||
orgs, err := db.GetOrganizations(ctx, database.GetOrganizationsParams{})
|
||||
require.NoError(t, err)
|
||||
orgIDs := make(map[uuid.UUID]struct{}, len(orgs))
|
||||
for _, org := range orgs {
|
||||
|
||||
+9
-1
@@ -45,10 +45,12 @@ import (
|
||||
"github.com/coder/coder/v2/cli/config"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtestutil"
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/coderd/telemetry"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/cryptorand"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
"github.com/coder/coder/v2/tailnet/tailnettest"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
@@ -1832,6 +1834,12 @@ func TestServer_InvalidDERP(t *testing.T) {
|
||||
func TestServer_DisabledDERP(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
derpMap, _ := tailnettest.RunDERPAndSTUN(t)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
httpapi.Write(context.Background(), w, http.StatusOK, derpMap)
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitShort)
|
||||
defer cancelFunc()
|
||||
|
||||
@@ -1843,7 +1851,7 @@ func TestServer_DisabledDERP(t *testing.T) {
|
||||
"--http-address", ":0",
|
||||
"--access-url", "http://example.com",
|
||||
"--derp-server-enable=false",
|
||||
"--derp-config-url", "https://controlplane.tailscale.com/derpmap/default",
|
||||
"--derp-config-url", srv.URL,
|
||||
)
|
||||
clitest.Start(t, inv.WithContext(ctx))
|
||||
accessURL := waitAccessURL(t, cfg)
|
||||
|
||||
+10
-10
@@ -32,11 +32,11 @@ func (r *RootCmd) stat() *serpent.Command {
|
||||
fs = afero.NewReadOnlyFs(afero.NewOsFs())
|
||||
formatter = cliui.NewOutputFormatter(
|
||||
cliui.TableFormat([]statsRow{}, []string{
|
||||
"host_cpu",
|
||||
"host_memory",
|
||||
"home_disk",
|
||||
"container_cpu",
|
||||
"container_memory",
|
||||
"host cpu",
|
||||
"host memory",
|
||||
"home disk",
|
||||
"container cpu",
|
||||
"container memory",
|
||||
}),
|
||||
cliui.JSONFormat(),
|
||||
)
|
||||
@@ -284,9 +284,9 @@ func (*RootCmd) statDisk(fs afero.Fs) *serpent.Command {
|
||||
}
|
||||
|
||||
type statsRow struct {
|
||||
HostCPU *clistat.Result `json:"host_cpu" table:"host_cpu,default_sort"`
|
||||
HostMemory *clistat.Result `json:"host_memory" table:"host_memory"`
|
||||
Disk *clistat.Result `json:"home_disk" table:"home_disk"`
|
||||
ContainerCPU *clistat.Result `json:"container_cpu" table:"container_cpu"`
|
||||
ContainerMemory *clistat.Result `json:"container_memory" table:"container_memory"`
|
||||
HostCPU *clistat.Result `json:"host_cpu" table:"host cpu,default_sort"`
|
||||
HostMemory *clistat.Result `json:"host_memory" table:"host memory"`
|
||||
Disk *clistat.Result `json:"home_disk" table:"home disk"`
|
||||
ContainerCPU *clistat.Result `json:"container_cpu" table:"container cpu"`
|
||||
ContainerMemory *clistat.Result `json:"container_memory" table:"container memory"`
|
||||
}
|
||||
|
||||
+35
-9
@@ -184,16 +184,8 @@ func (r *RootCmd) supportBundle() *serpent.Command {
|
||||
_ = os.Remove(outputPath) // best effort
|
||||
return xerrors.Errorf("create support bundle: %w", err)
|
||||
}
|
||||
docsURL := bun.Deployment.Config.Values.DocsURL.String()
|
||||
deployHealthSummary := bun.Deployment.HealthReport.Summarize(docsURL)
|
||||
if len(deployHealthSummary) > 0 {
|
||||
cliui.Warn(inv.Stdout, "Deployment health issues detected:", deployHealthSummary...)
|
||||
}
|
||||
clientNetcheckSummary := bun.Network.Netcheck.Summarize("Client netcheck:", docsURL)
|
||||
if len(clientNetcheckSummary) > 0 {
|
||||
cliui.Warn(inv.Stdout, "Networking issues detected:", deployHealthSummary...)
|
||||
}
|
||||
|
||||
summarizeBundle(inv, bun)
|
||||
bun.CLILogs = cliLogBuf.Bytes()
|
||||
|
||||
if err := writeBundle(bun, zwr); err != nil {
|
||||
@@ -225,6 +217,40 @@ func (r *RootCmd) supportBundle() *serpent.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
// summarizeBundle makes a best-effort attempt to write a short summary
|
||||
// of the support bundle to the user's terminal.
|
||||
func summarizeBundle(inv *serpent.Invocation, bun *support.Bundle) {
|
||||
if bun == nil {
|
||||
cliui.Error(inv.Stdout, "No support bundle generated!")
|
||||
return
|
||||
}
|
||||
|
||||
if bun.Deployment.Config == nil {
|
||||
cliui.Error(inv.Stdout, "No deployment configuration available!")
|
||||
return
|
||||
}
|
||||
|
||||
docsURL := bun.Deployment.Config.Values.DocsURL.String()
|
||||
if bun.Deployment.HealthReport == nil {
|
||||
cliui.Error(inv.Stdout, "No deployment health report available!")
|
||||
return
|
||||
}
|
||||
deployHealthSummary := bun.Deployment.HealthReport.Summarize(docsURL)
|
||||
if len(deployHealthSummary) > 0 {
|
||||
cliui.Warn(inv.Stdout, "Deployment health issues detected:", deployHealthSummary...)
|
||||
}
|
||||
|
||||
if bun.Network.Netcheck == nil {
|
||||
cliui.Error(inv.Stdout, "No network troubleshooting information available!")
|
||||
return
|
||||
}
|
||||
|
||||
clientNetcheckSummary := bun.Network.Netcheck.Summarize("Client netcheck:", docsURL)
|
||||
if len(clientNetcheckSummary) > 0 {
|
||||
cliui.Warn(inv.Stdout, "Networking issues detected:", deployHealthSummary...)
|
||||
}
|
||||
}
|
||||
|
||||
func findAgent(agentName string, haystack []codersdk.WorkspaceResource) (*codersdk.WorkspaceAgent, bool) {
|
||||
for _, res := range haystack {
|
||||
for _, agt := range res.Agents {
|
||||
|
||||
@@ -5,6 +5,9 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
@@ -14,6 +17,7 @@ import (
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/agent"
|
||||
@@ -156,6 +160,53 @@ func TestSupportBundle(t *testing.T) {
|
||||
err := inv.Run()
|
||||
require.ErrorContains(t, err, "failed authorization check")
|
||||
})
|
||||
|
||||
// This ensures that the CLI does not panic when trying to generate a support bundle
|
||||
// against a fake server that returns an empty response for all requests. This essentially
|
||||
// ensures that (almost) all of the support bundle generating code paths get a zero value.
|
||||
t.Run("DontPanic", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, code := range []int{
|
||||
http.StatusOK,
|
||||
http.StatusUnauthorized,
|
||||
http.StatusForbidden,
|
||||
http.StatusNotFound,
|
||||
http.StatusInternalServerError,
|
||||
} {
|
||||
t.Run(http.StatusText(code), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Start up a fake server
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
t.Logf("received request: %s %s", r.Method, r.URL)
|
||||
switch r.URL.Path {
|
||||
case "/api/v2/authcheck":
|
||||
// Fake auth check
|
||||
resp := codersdk.AuthorizationResponse{
|
||||
"Read DeploymentValues": true,
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
assert.NoError(t, json.NewEncoder(w).Encode(resp))
|
||||
default:
|
||||
// Simply return a blank response for everything else.
|
||||
w.WriteHeader(code)
|
||||
}
|
||||
}))
|
||||
defer srv.Close()
|
||||
u, err := url.Parse(srv.URL)
|
||||
require.NoError(t, err)
|
||||
client := codersdk.New(u)
|
||||
|
||||
d := t.TempDir()
|
||||
path := filepath.Join(d, "bundle.zip")
|
||||
|
||||
inv, root := clitest.New(t, "support", "bundle", "--url-override", srv.URL, "--output-file", path, "--yes")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
err = inv.Run()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// nolint:revive // It's a control flag, but this is just a test.
|
||||
|
||||
+3
-25
@@ -3,7 +3,6 @@ package cli
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
@@ -239,35 +238,14 @@ func (r *RootCmd) templateEdit() *serpent.Command {
|
||||
Value: serpent.DurationOf(&activityBump),
|
||||
},
|
||||
{
|
||||
Flag: "autostart-requirement-weekdays",
|
||||
// workspaces created from this template must be restarted on the given weekdays. To unset this value for the template (and disable the autostop requirement for the template), pass 'none'.
|
||||
Flag: "autostart-requirement-weekdays",
|
||||
Description: "Edit the template autostart requirement weekdays - workspaces created from this template can only autostart on the given weekdays. To unset this value for the template (and allow autostart on all days), pass 'all'.",
|
||||
Value: serpent.Validate(serpent.StringArrayOf(&autostartRequirementDaysOfWeek), func(value *serpent.StringArray) error {
|
||||
v := value.GetSlice()
|
||||
if len(v) == 1 && v[0] == "all" {
|
||||
return nil
|
||||
}
|
||||
_, err := codersdk.WeekdaysToBitmap(v)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("invalid autostart requirement days of week %q: %w", strings.Join(v, ","), err)
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
Value: serpent.EnumArrayOf(&autostartRequirementDaysOfWeek, append(codersdk.AllDaysOfWeek, "all")...),
|
||||
},
|
||||
{
|
||||
Flag: "autostop-requirement-weekdays",
|
||||
Description: "Edit the template autostop requirement weekdays - workspaces created from this template must be restarted on the given weekdays. To unset this value for the template (and disable the autostop requirement for the template), pass 'none'.",
|
||||
Value: serpent.Validate(serpent.StringArrayOf(&autostopRequirementDaysOfWeek), func(value *serpent.StringArray) error {
|
||||
v := value.GetSlice()
|
||||
if len(v) == 1 && v[0] == "none" {
|
||||
return nil
|
||||
}
|
||||
_, err := codersdk.WeekdaysToBitmap(v)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("invalid autostop requirement days of week %q: %w", strings.Join(v, ","), err)
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
Value: serpent.EnumArrayOf(&autostopRequirementDaysOfWeek, append(codersdk.AllDaysOfWeek, "none")...),
|
||||
},
|
||||
{
|
||||
Flag: "autostop-requirement-weeks",
|
||||
|
||||
@@ -40,11 +40,11 @@ func (r *RootCmd) templateVersions() *serpent.Command {
|
||||
|
||||
func (r *RootCmd) templateVersionsList() *serpent.Command {
|
||||
defaultColumns := []string{
|
||||
"Name",
|
||||
"Created At",
|
||||
"Created By",
|
||||
"Status",
|
||||
"Active",
|
||||
"name",
|
||||
"created at",
|
||||
"created by",
|
||||
"status",
|
||||
"active",
|
||||
}
|
||||
formatter := cliui.NewOutputFormatter(
|
||||
cliui.TableFormat([]templateVersionRow{}, defaultColumns),
|
||||
@@ -70,10 +70,10 @@ func (r *RootCmd) templateVersionsList() *serpent.Command {
|
||||
for _, opt := range i.Command.Options {
|
||||
if opt.Flag == "column" {
|
||||
if opt.ValueSource == serpent.ValueSourceDefault {
|
||||
v, ok := opt.Value.(*serpent.StringArray)
|
||||
v, ok := opt.Value.(*serpent.EnumArray)
|
||||
if ok {
|
||||
// Add the extra new default column.
|
||||
*v = append(*v, "Archived")
|
||||
_ = v.Append("Archived")
|
||||
}
|
||||
}
|
||||
break
|
||||
|
||||
Vendored
+2
@@ -15,6 +15,8 @@ USAGE:
|
||||
|
||||
SUBCOMMANDS:
|
||||
autoupdate Toggle auto-update policy for a workspace
|
||||
completion Install or update shell completion scripts for the
|
||||
detected or chosen shell.
|
||||
config-ssh Add an SSH Host entry for your workspaces "ssh
|
||||
coder.workspace"
|
||||
create Create a workspace
|
||||
|
||||
+9
@@ -15,6 +15,15 @@ OPTIONS:
|
||||
--log-stackdriver string, $CODER_AGENT_LOGGING_STACKDRIVER
|
||||
Output Stackdriver compatible logs to a given file.
|
||||
|
||||
--agent-header string-array, $CODER_AGENT_HEADER
|
||||
Additional HTTP headers added to all requests. Provide as key=value.
|
||||
Can be specified multiple times.
|
||||
|
||||
--agent-header-command string, $CODER_AGENT_HEADER_COMMAND
|
||||
An external command that outputs additional HTTP headers added to all
|
||||
requests. The command must output each header as `key=value` on its
|
||||
own line.
|
||||
|
||||
--auth string, $CODER_AGENT_AUTH (default: token)
|
||||
Specify the authentication type to use for the agent.
|
||||
|
||||
|
||||
+16
@@ -0,0 +1,16 @@
|
||||
coder v0.0.0-devel
|
||||
|
||||
USAGE:
|
||||
coder completion [flags]
|
||||
|
||||
Install or update shell completion scripts for the detected or chosen shell.
|
||||
|
||||
OPTIONS:
|
||||
-p, --print bool
|
||||
Print the completion script instead of installing it.
|
||||
|
||||
-s, --shell bash|fish|zsh|powershell
|
||||
The shell to install completion for.
|
||||
|
||||
———
|
||||
Run `coder --help` for a list of global options.
|
||||
+4
-7
@@ -11,14 +11,11 @@ OPTIONS:
|
||||
-a, --all bool
|
||||
Specifies whether all workspaces will be listed or not.
|
||||
|
||||
-c, --column string-array (default: workspace,template,status,healthy,last built,current version,outdated,starts at,stops after)
|
||||
Columns to display in table output. Available columns: favorite,
|
||||
workspace, organization id, organization name, template, status,
|
||||
healthy, last built, current version, outdated, starts at, starts
|
||||
next, stops after, stops next, daily cost.
|
||||
-c, --column [favorite|workspace|organization id|organization name|template|status|healthy|last built|current version|outdated|starts at|starts next|stops after|stops next|daily cost] (default: workspace,template,status,healthy,last built,current version,outdated,starts at,stops after)
|
||||
Columns to display in table output.
|
||||
|
||||
-o, --output string (default: table)
|
||||
Output format. Available formats: table, json.
|
||||
-o, --output table|json (default: table)
|
||||
Output format.
|
||||
|
||||
--search string (default: owner:me)
|
||||
Search for a workspace with a query.
|
||||
|
||||
+4
-5
@@ -15,12 +15,11 @@ OPTIONS:
|
||||
-a, --all bool
|
||||
Specifies whether all workspaces will be listed or not.
|
||||
|
||||
-c, --column string-array (default: workspace,starts at,starts next,stops after,stops next)
|
||||
Columns to display in table output. Available columns: workspace,
|
||||
starts at, starts next, stops after, stops next.
|
||||
-c, --column [workspace|starts at|starts next|stops after|stops next] (default: workspace,starts at,starts next,stops after,stops next)
|
||||
Columns to display in table output.
|
||||
|
||||
-o, --output string (default: table)
|
||||
Output format. Available formats: table, json.
|
||||
-o, --output table|json (default: table)
|
||||
Output format.
|
||||
|
||||
--search string (default: owner:me)
|
||||
Search for a workspace with a query.
|
||||
|
||||
+15
-1
@@ -22,7 +22,8 @@ OPTIONS:
|
||||
--cache-dir string, $CODER_CACHE_DIRECTORY (default: [cache dir])
|
||||
The directory to cache temporary files. If unspecified and
|
||||
$CACHE_DIRECTORY is set, it will be used for compatibility with
|
||||
systemd.
|
||||
systemd. This directory is NOT safe to be configured as a shared
|
||||
directory across coderd/provisionerd replicas.
|
||||
|
||||
--disable-owner-workspace-access bool, $CODER_DISABLE_OWNER_WORKSPACE_ACCESS
|
||||
Remove the permission for the 'owner' role to have workspace execution
|
||||
@@ -432,6 +433,11 @@ OIDC OPTIONS:
|
||||
groups. This filter is applied after the group mapping and before the
|
||||
regex filter.
|
||||
|
||||
--oidc-organization-assign-default bool, $CODER_OIDC_ORGANIZATION_ASSIGN_DEFAULT (default: true)
|
||||
If set to true, users will always be added to the default
|
||||
organization. If organization sync is enabled, then the default org is
|
||||
always added to the user's set of expectedorganizations.
|
||||
|
||||
--oidc-auth-url-params struct[map[string]string], $CODER_OIDC_AUTH_URL_PARAMS (default: {"access_type": "offline"})
|
||||
OIDC auth URL parameters to pass to the upstream provider.
|
||||
|
||||
@@ -478,6 +484,14 @@ OIDC OPTIONS:
|
||||
--oidc-name-field string, $CODER_OIDC_NAME_FIELD (default: name)
|
||||
OIDC claim field to use as the name.
|
||||
|
||||
--oidc-organization-field string, $CODER_OIDC_ORGANIZATION_FIELD
|
||||
This field must be set if using the organization sync feature. Set to
|
||||
the claim to be used for organizations.
|
||||
|
||||
--oidc-organization-mapping struct[map[string][]uuid.UUID], $CODER_OIDC_ORGANIZATION_MAPPING (default: {})
|
||||
A map of OIDC claims and the organizations in Coder it should map to.
|
||||
This is required because organization IDs must be used within Coder.
|
||||
|
||||
--oidc-group-regex-filter regexp, $CODER_OIDC_GROUP_REGEX_FILTER (default: .*)
|
||||
If provided any group name not matching the regex is ignored. This
|
||||
allows for filtering out groups that are not needed. This filter is
|
||||
|
||||
+4
-5
@@ -6,9 +6,8 @@ USAGE:
|
||||
Run upload and download tests from your machine to a workspace
|
||||
|
||||
OPTIONS:
|
||||
-c, --column string-array (default: Interval,Throughput)
|
||||
Columns to display in table output. Available columns: Interval,
|
||||
Throughput.
|
||||
-c, --column [Interval|Throughput] (default: Interval,Throughput)
|
||||
Columns to display in table output.
|
||||
|
||||
-d, --direct bool
|
||||
Specifies whether to wait for a direct connection before testing
|
||||
@@ -18,8 +17,8 @@ OPTIONS:
|
||||
Specifies whether to run in reverse mode where the client receives and
|
||||
the server sends.
|
||||
|
||||
-o, --output string (default: table)
|
||||
Output format. Available formats: table, json.
|
||||
-o, --output table|json (default: table)
|
||||
Output format.
|
||||
|
||||
--pcap-file string
|
||||
Specifies a file to write a network capture to.
|
||||
|
||||
+4
-5
@@ -11,12 +11,11 @@ SUBCOMMANDS:
|
||||
mem Show memory usage, in gigabytes.
|
||||
|
||||
OPTIONS:
|
||||
-c, --column string-array (default: host_cpu,host_memory,home_disk,container_cpu,container_memory)
|
||||
Columns to display in table output. Available columns: host cpu, host
|
||||
memory, home disk, container cpu, container memory.
|
||||
-c, --column [host cpu|host memory|home disk|container cpu|container memory] (default: host cpu,host memory,home disk,container cpu,container memory)
|
||||
Columns to display in table output.
|
||||
|
||||
-o, --output string (default: table)
|
||||
Output format. Available formats: table, json.
|
||||
-o, --output table|json (default: table)
|
||||
Output format.
|
||||
|
||||
———
|
||||
Run `coder --help` for a list of global options.
|
||||
|
||||
+2
-2
@@ -9,8 +9,8 @@ OPTIONS:
|
||||
--host bool
|
||||
Force host CPU measurement.
|
||||
|
||||
-o, --output string (default: text)
|
||||
Output format. Available formats: text, json.
|
||||
-o, --output text|json (default: text)
|
||||
Output format.
|
||||
|
||||
———
|
||||
Run `coder --help` for a list of global options.
|
||||
|
||||
+2
-2
@@ -6,8 +6,8 @@ USAGE:
|
||||
Show disk usage, in gigabytes.
|
||||
|
||||
OPTIONS:
|
||||
-o, --output string (default: text)
|
||||
Output format. Available formats: text, json.
|
||||
-o, --output text|json (default: text)
|
||||
Output format.
|
||||
|
||||
--path string (default: /)
|
||||
Path for which to check disk usage.
|
||||
|
||||
+2
-2
@@ -9,8 +9,8 @@ OPTIONS:
|
||||
--host bool
|
||||
Force host memory measurement.
|
||||
|
||||
-o, --output string (default: text)
|
||||
Output format. Available formats: text, json.
|
||||
-o, --output text|json (default: text)
|
||||
Output format.
|
||||
|
||||
--prefix Ki|Mi|Gi|Ti (default: Gi)
|
||||
SI Prefix for memory measurement.
|
||||
|
||||
+2
-2
@@ -25,13 +25,13 @@ OPTIONS:
|
||||
--allow-user-cancel-workspace-jobs bool (default: true)
|
||||
Allow users to cancel in-progress workspace jobs.
|
||||
|
||||
--autostart-requirement-weekdays string-array
|
||||
--autostart-requirement-weekdays [monday|tuesday|wednesday|thursday|friday|saturday|sunday|all]
|
||||
Edit the template autostart requirement weekdays - workspaces created
|
||||
from this template can only autostart on the given weekdays. To unset
|
||||
this value for the template (and allow autostart on all days), pass
|
||||
'all'.
|
||||
|
||||
--autostop-requirement-weekdays string-array
|
||||
--autostop-requirement-weekdays [monday|tuesday|wednesday|thursday|friday|saturday|sunday|none]
|
||||
Edit the template autostop requirement weekdays - workspaces created
|
||||
from this template must be restarted on the given weekdays. To unset
|
||||
this value for the template (and disable the autostop requirement for
|
||||
|
||||
+4
-6
@@ -8,13 +8,11 @@ USAGE:
|
||||
Aliases: ls
|
||||
|
||||
OPTIONS:
|
||||
-c, --column string-array (default: name,organization name,last updated,used by)
|
||||
Columns to display in table output. Available columns: name, created
|
||||
at, last updated, organization id, organization name, provisioner,
|
||||
active version id, used by, default ttl.
|
||||
-c, --column [name|created at|last updated|organization id|organization name|provisioner|active version id|used by|default ttl] (default: name,organization name,last updated,used by)
|
||||
Columns to display in table output.
|
||||
|
||||
-o, --output string (default: table)
|
||||
Output format. Available formats: table, json.
|
||||
-o, --output table|json (default: table)
|
||||
Output format.
|
||||
|
||||
———
|
||||
Run `coder --help` for a list of global options.
|
||||
|
||||
@@ -9,15 +9,14 @@ OPTIONS:
|
||||
-O, --org string, $CODER_ORGANIZATION
|
||||
Select which organization (uuid or name) to use.
|
||||
|
||||
-c, --column string-array (default: Name,Created At,Created By,Status,Active)
|
||||
Columns to display in table output. Available columns: name, created
|
||||
at, created by, status, active, archived.
|
||||
-c, --column [name|created at|created by|status|active|archived] (default: name,created at,created by,status,active)
|
||||
Columns to display in table output.
|
||||
|
||||
--include-archived bool
|
||||
Include archived versions in the result list.
|
||||
|
||||
-o, --output string (default: table)
|
||||
Output format. Available formats: table, json.
|
||||
-o, --output table|json (default: table)
|
||||
Output format.
|
||||
|
||||
———
|
||||
Run `coder --help` for a list of global options.
|
||||
|
||||
+4
-5
@@ -12,12 +12,11 @@ OPTIONS:
|
||||
Specifies whether all users' tokens will be listed or not (must have
|
||||
Owner role to see all tokens).
|
||||
|
||||
-c, --column string-array (default: id,name,last used,expires at,created at)
|
||||
Columns to display in table output. Available columns: id, name, last
|
||||
used, expires at, created at, owner.
|
||||
-c, --column [id|name|last used|expires at|created at|owner] (default: id,name,last used,expires at,created at)
|
||||
Columns to display in table output.
|
||||
|
||||
-o, --output string (default: table)
|
||||
Output format. Available formats: table, json.
|
||||
-o, --output table|json (default: table)
|
||||
Output format.
|
||||
|
||||
———
|
||||
Run `coder --help` for a list of global options.
|
||||
|
||||
+1
-1
@@ -11,7 +11,7 @@ USAGE:
|
||||
$ coder users activate example_user
|
||||
|
||||
OPTIONS:
|
||||
-c, --column string-array (default: username,email,created_at,status)
|
||||
-c, --column [username|email|created at|status] (default: username,email,created at,status)
|
||||
Specify a column to filter in the table.
|
||||
|
||||
———
|
||||
|
||||
+4
-5
@@ -6,12 +6,11 @@ USAGE:
|
||||
Aliases: ls
|
||||
|
||||
OPTIONS:
|
||||
-c, --column string-array (default: username,email,created_at,status)
|
||||
Columns to display in table output. Available columns: id, username,
|
||||
email, created at, updated at, status.
|
||||
-c, --column [id|username|email|created at|updated at|status] (default: username,email,created at,status)
|
||||
Columns to display in table output.
|
||||
|
||||
-o, --output string (default: table)
|
||||
Output format. Available formats: table, json.
|
||||
-o, --output table|json (default: table)
|
||||
Output format.
|
||||
|
||||
———
|
||||
Run `coder --help` for a list of global options.
|
||||
|
||||
+2
-2
@@ -8,8 +8,8 @@ USAGE:
|
||||
$ coder users show me
|
||||
|
||||
OPTIONS:
|
||||
-o, --output string (default: table)
|
||||
Output format. Available formats: table, json.
|
||||
-o, --output table|json (default: table)
|
||||
Output format.
|
||||
|
||||
———
|
||||
Run `coder --help` for a list of global options.
|
||||
|
||||
+1
-1
@@ -9,7 +9,7 @@ USAGE:
|
||||
$ coder users suspend example_user
|
||||
|
||||
OPTIONS:
|
||||
-c, --column string-array (default: username,email,created_at,status)
|
||||
-c, --column [username|email|created at|status] (default: username,email,created at,status)
|
||||
Specify a column to filter in the table.
|
||||
|
||||
———
|
||||
|
||||
+2
-2
@@ -6,8 +6,8 @@ USAGE:
|
||||
Show coder version
|
||||
|
||||
OPTIONS:
|
||||
-o, --output string (default: text)
|
||||
Output format. Available formats: text, json.
|
||||
-o, --output text|json (default: text)
|
||||
Output format.
|
||||
|
||||
———
|
||||
Run `coder --help` for a list of global options.
|
||||
|
||||
+15
-4
@@ -319,6 +319,19 @@ oidc:
|
||||
# Ignore the userinfo endpoint and only use the ID token for user information.
|
||||
# (default: false, type: bool)
|
||||
ignoreUserInfo: false
|
||||
# This field must be set if using the organization sync feature. Set to the claim
|
||||
# to be used for organizations.
|
||||
# (default: <unset>, type: string)
|
||||
organizationField: ""
|
||||
# If set to true, users will always be added to the default organization. If
|
||||
# organization sync is enabled, then the default org is always added to the user's
|
||||
# set of expectedorganizations.
|
||||
# (default: true, type: bool)
|
||||
organizationAssignDefault: true
|
||||
# A map of OIDC claims and the organizations in Coder it should map to. This is
|
||||
# required because organization IDs must be used within Coder.
|
||||
# (default: {}, type: struct[map[string][]uuid.UUID])
|
||||
organizationMapping: {}
|
||||
# This field must be set if using the group sync feature and the scope name is not
|
||||
# 'groups'. Set to the claim to be used for groups.
|
||||
# (default: <unset>, type: string)
|
||||
@@ -414,7 +427,8 @@ updateCheck: false
|
||||
# (default: <unset>, type: bool)
|
||||
enableSwagger: false
|
||||
# The directory to cache temporary files. If unspecified and $CACHE_DIRECTORY is
|
||||
# set, it will be used for compatibility with systemd.
|
||||
# set, it will be used for compatibility with systemd. This directory is NOT safe
|
||||
# to be configured as a shared directory across coderd/provisionerd replicas.
|
||||
# (default: [cache dir], type: string)
|
||||
cacheDir: [cache dir]
|
||||
# Controls whether data will be stored in an in-memory database.
|
||||
@@ -528,9 +542,6 @@ notifications:
|
||||
# Username to use with PLAIN/LOGIN authentication.
|
||||
# (default: <unset>, type: string)
|
||||
username: ""
|
||||
# Password to use with PLAIN/LOGIN authentication.
|
||||
# (default: <unset>, type: string)
|
||||
password: ""
|
||||
# File from which to load password for use with PLAIN/LOGIN authentication.
|
||||
# (default: <unset>, type: string)
|
||||
passwordFile: ""
|
||||
|
||||
+20
-23
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
@@ -26,13 +27,12 @@ func TestUserDelete(t *testing.T) {
|
||||
pw, err := cryptorand.String(16)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = client.CreateUser(ctx, codersdk.CreateUserRequest{
|
||||
Email: "colin5@coder.com",
|
||||
Username: "coolin",
|
||||
Password: pw,
|
||||
UserLoginType: codersdk.LoginTypePassword,
|
||||
OrganizationID: owner.OrganizationID,
|
||||
DisableLogin: false,
|
||||
_, err = client.CreateUserWithOrgs(ctx, codersdk.CreateUserRequestWithOrgs{
|
||||
Email: "colin5@coder.com",
|
||||
Username: "coolin",
|
||||
Password: pw,
|
||||
UserLoginType: codersdk.LoginTypePassword,
|
||||
OrganizationIDs: []uuid.UUID{owner.OrganizationID},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -57,13 +57,12 @@ func TestUserDelete(t *testing.T) {
|
||||
pw, err := cryptorand.String(16)
|
||||
require.NoError(t, err)
|
||||
|
||||
user, err := client.CreateUser(ctx, codersdk.CreateUserRequest{
|
||||
Email: "colin5@coder.com",
|
||||
Username: "coolin",
|
||||
Password: pw,
|
||||
UserLoginType: codersdk.LoginTypePassword,
|
||||
OrganizationID: owner.OrganizationID,
|
||||
DisableLogin: false,
|
||||
user, err := client.CreateUserWithOrgs(ctx, codersdk.CreateUserRequestWithOrgs{
|
||||
Email: "colin5@coder.com",
|
||||
Username: "coolin",
|
||||
Password: pw,
|
||||
UserLoginType: codersdk.LoginTypePassword,
|
||||
OrganizationIDs: []uuid.UUID{owner.OrganizationID},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -88,13 +87,12 @@ func TestUserDelete(t *testing.T) {
|
||||
pw, err := cryptorand.String(16)
|
||||
require.NoError(t, err)
|
||||
|
||||
user, err := client.CreateUser(ctx, codersdk.CreateUserRequest{
|
||||
Email: "colin5@coder.com",
|
||||
Username: "coolin",
|
||||
Password: pw,
|
||||
UserLoginType: codersdk.LoginTypePassword,
|
||||
OrganizationID: owner.OrganizationID,
|
||||
DisableLogin: false,
|
||||
user, err := client.CreateUserWithOrgs(ctx, codersdk.CreateUserRequestWithOrgs{
|
||||
Email: "colin5@coder.com",
|
||||
Username: "coolin",
|
||||
Password: pw,
|
||||
UserLoginType: codersdk.LoginTypePassword,
|
||||
OrganizationIDs: []uuid.UUID{owner.OrganizationID},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -121,13 +119,12 @@ func TestUserDelete(t *testing.T) {
|
||||
// pw, err := cryptorand.String(16)
|
||||
// require.NoError(t, err)
|
||||
|
||||
// toDelete, err := client.CreateUser(ctx, codersdk.CreateUserRequest{
|
||||
// toDelete, err := client.CreateUserWithOrgs(ctx, codersdk.CreateUserRequestWithOrgs{
|
||||
// Email: "colin5@coder.com",
|
||||
// Username: "coolin",
|
||||
// Password: pw,
|
||||
// UserLoginType: codersdk.LoginTypePassword,
|
||||
// OrganizationID: aUser.OrganizationID,
|
||||
// DisableLogin: false,
|
||||
// })
|
||||
// require.NoError(t, err)
|
||||
|
||||
|
||||
+8
-7
@@ -5,6 +5,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/go-playground/validator/v10"
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/pretty"
|
||||
@@ -94,13 +95,13 @@ func (r *RootCmd) userCreate() *serpent.Command {
|
||||
}
|
||||
}
|
||||
|
||||
_, err = client.CreateUser(inv.Context(), codersdk.CreateUserRequest{
|
||||
Email: email,
|
||||
Username: username,
|
||||
Name: name,
|
||||
Password: password,
|
||||
OrganizationID: organization.ID,
|
||||
UserLoginType: userLoginType,
|
||||
_, err = client.CreateUserWithOrgs(inv.Context(), codersdk.CreateUserRequestWithOrgs{
|
||||
Email: email,
|
||||
Username: username,
|
||||
Name: name,
|
||||
Password: password,
|
||||
OrganizationIDs: []uuid.UUID{organization.ID},
|
||||
UserLoginType: userLoginType,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
+1
-1
@@ -15,7 +15,7 @@ import (
|
||||
|
||||
func (r *RootCmd) userList() *serpent.Command {
|
||||
formatter := cliui.NewOutputFormatter(
|
||||
cliui.TableFormat([]codersdk.User{}, []string{"username", "email", "created_at", "status"}),
|
||||
cliui.TableFormat([]codersdk.User{}, []string{"username", "email", "created at", "status"}),
|
||||
cliui.JSONFormat(),
|
||||
)
|
||||
client := new(codersdk.Client)
|
||||
|
||||
+3
-2
@@ -36,6 +36,7 @@ func (r *RootCmd) createUserStatusCommand(sdkStatus codersdk.UserStatus) *serpen
|
||||
client := new(codersdk.Client)
|
||||
|
||||
var columns []string
|
||||
allColumns := []string{"username", "email", "created at", "status"}
|
||||
cmd := &serpent.Command{
|
||||
Use: fmt.Sprintf("%s <username|user_id>", verb),
|
||||
Short: short,
|
||||
@@ -99,8 +100,8 @@ func (r *RootCmd) createUserStatusCommand(sdkStatus codersdk.UserStatus) *serpen
|
||||
Flag: "column",
|
||||
FlagShorthand: "c",
|
||||
Description: "Specify a column to filter in the table.",
|
||||
Default: strings.Join([]string{"username", "email", "created_at", "status"}, ","),
|
||||
Value: serpent.StringArrayOf(&columns),
|
||||
Default: strings.Join(allColumns, ","),
|
||||
Value: serpent.EnumArrayOf(&columns, allColumns...),
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
|
||||
Generated
+550
-17
@@ -988,7 +988,7 @@ const docTemplate = `{
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"description": "File to be uploaded",
|
||||
"description": "File to be uploaded. If using tar format, file must conform to ustar (pax may cause problems).",
|
||||
"name": "file",
|
||||
"in": "formData",
|
||||
"required": true
|
||||
@@ -1033,6 +1033,50 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/groups": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Enterprise"
|
||||
],
|
||||
"summary": "Get groups",
|
||||
"operationId": "get-groups",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Organization ID or name",
|
||||
"name": "organization",
|
||||
"in": "query",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "User ID or name",
|
||||
"name": "has_member",
|
||||
"in": "query",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Group"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/groups/{group}": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -1547,6 +1591,34 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/notifications/dispatch-methods": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Notifications"
|
||||
],
|
||||
"summary": "Get notification dispatch methods",
|
||||
"operationId": "get-notification-dispatch-methods",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.NotificationMethodsResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/notifications/settings": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -1558,7 +1630,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"General"
|
||||
"Notifications"
|
||||
],
|
||||
"summary": "Get notifications settings",
|
||||
"operationId": "get-notifications-settings",
|
||||
@@ -1584,7 +1656,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"General"
|
||||
"Notifications"
|
||||
],
|
||||
"summary": "Update notifications settings",
|
||||
"operationId": "update-notifications-settings",
|
||||
@@ -1612,6 +1684,68 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/notifications/templates/system": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Notifications"
|
||||
],
|
||||
"summary": "Get system notification templates",
|
||||
"operationId": "get-system-notification-templates",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.NotificationTemplate"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/notifications/templates/{notification_template}/method": {
|
||||
"put": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Enterprise"
|
||||
],
|
||||
"summary": "Update notification template dispatch method",
|
||||
"operationId": "update-notification-template-dispatch-method",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Notification template UUID",
|
||||
"name": "notification_template",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Success"
|
||||
},
|
||||
"304": {
|
||||
"description": "Not modified"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/oauth2-provider/apps": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -2410,12 +2544,15 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"patch": {
|
||||
"put": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
@@ -2432,6 +2569,108 @@ const docTemplate = `{
|
||||
"name": "organization",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"description": "Upsert role request",
|
||||
"name": "request",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.CustomRoleRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Role"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Members"
|
||||
],
|
||||
"summary": "Insert a custom organization role",
|
||||
"operationId": "insert-a-custom-organization-role",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Organization ID",
|
||||
"name": "organization",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"description": "Insert role request",
|
||||
"name": "request",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.CustomRoleRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Role"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/organizations/{organization}/members/roles/{roleName}": {
|
||||
"delete": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Members"
|
||||
],
|
||||
"summary": "Delete a custom organization role",
|
||||
"operationId": "delete-a-custom-organization-role",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Organization ID",
|
||||
"name": "organization",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Role name",
|
||||
"name": "roleName",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -2574,6 +2813,48 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/organizations/{organization}/members/{user}/workspace-quota": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Enterprise"
|
||||
],
|
||||
"summary": "Get workspace quota by user",
|
||||
"operationId": "get-workspace-quota-by-user",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "User ID, name, or me",
|
||||
"name": "user",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Organization ID",
|
||||
"name": "organization",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.WorkspaceQuota"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/organizations/{organization}/members/{user}/workspaces": {
|
||||
"post": {
|
||||
"security": [
|
||||
@@ -2898,6 +3179,7 @@ const docTemplate = `{
|
||||
],
|
||||
"summary": "Get template examples by organization",
|
||||
"operationId": "get-template-examples-by-organization",
|
||||
"deprecated": true,
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -3319,6 +3601,34 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/templates/examples": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Templates"
|
||||
],
|
||||
"summary": "Get template examples",
|
||||
"operationId": "get-template-examples",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.TemplateExample"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/templates/{template}": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -4565,7 +4875,7 @@ const docTemplate = `{
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.CreateUserRequest"
|
||||
"$ref": "#/definitions/codersdk.CreateUserRequestWithOrgs"
|
||||
}
|
||||
}
|
||||
],
|
||||
@@ -4845,8 +5155,8 @@ const docTemplate = `{
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"204": {
|
||||
"description": "No Content"
|
||||
"200": {
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5354,6 +5664,90 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/users/{user}/notifications/preferences": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Notifications"
|
||||
],
|
||||
"summary": "Get user notification preferences",
|
||||
"operationId": "get-user-notification-preferences",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "User ID, name, or me",
|
||||
"name": "user",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.NotificationPreference"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"put": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Notifications"
|
||||
],
|
||||
"summary": "Update user notification preferences",
|
||||
"operationId": "update-user-notification-preferences",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Preferences",
|
||||
"name": "request",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.UpdateUserNotificationPreferences"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "User ID, name, or me",
|
||||
"name": "user",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.NotificationPreference"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/users/{user}/organizations": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -5906,8 +6300,9 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Enterprise"
|
||||
],
|
||||
"summary": "Get workspace quota by user",
|
||||
"operationId": "get-workspace-quota-by-user",
|
||||
"summary": "Get workspace quota by user deprecated",
|
||||
"operationId": "get-workspace-quota-by-user-deprecated",
|
||||
"deprecated": true,
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -8618,6 +9013,10 @@ const docTemplate = `{
|
||||
"description": "ExternalURL references the current Coder version.\nFor production builds, this will link directly to a release. For development builds, this will link to a commit.",
|
||||
"type": "string"
|
||||
},
|
||||
"provisioner_api_version": {
|
||||
"description": "ProvisionerAPIVersion is the current version of the Provisioner API",
|
||||
"type": "string"
|
||||
},
|
||||
"telemetry": {
|
||||
"description": "Telemetry is a boolean that indicates whether telemetry is enabled.",
|
||||
"type": "boolean"
|
||||
@@ -8868,6 +9267,14 @@ const docTemplate = `{
|
||||
"description": "Icon is a relative path or external URL that specifies\nan icon to be displayed in the dashboard.",
|
||||
"type": "string"
|
||||
},
|
||||
"max_port_share_level": {
|
||||
"description": "MaxPortShareLevel allows optionally specifying the maximum port share level\nfor workspaces created from the template.",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/codersdk.WorkspaceAgentPortShareLevel"
|
||||
}
|
||||
]
|
||||
},
|
||||
"name": {
|
||||
"description": "Name is the name of the template.",
|
||||
"type": "string"
|
||||
@@ -9046,17 +9453,13 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.CreateUserRequest": {
|
||||
"codersdk.CreateUserRequestWithOrgs": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"email",
|
||||
"username"
|
||||
],
|
||||
"properties": {
|
||||
"disable_login": {
|
||||
"description": "DisableLogin sets the user's login type to 'none'. This prevents the user\nfrom being able to use a password or any other authentication method to login.\nDeprecated: Set UserLoginType=LoginTypeDisabled instead.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"email": {
|
||||
"type": "string",
|
||||
"format": "email"
|
||||
@@ -9072,9 +9475,13 @@ const docTemplate = `{
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"organization_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
"organization_ids": {
|
||||
"description": "OrganizationIDs is a list of organization IDs that the user should be a member of.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
}
|
||||
},
|
||||
"password": {
|
||||
"type": "string"
|
||||
@@ -9195,6 +9602,36 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.CustomRoleRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"display_name": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"organization_permissions": {
|
||||
"description": "OrganizationPermissions are specific to the organization the role belongs to.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Permission"
|
||||
}
|
||||
},
|
||||
"site_permissions": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Permission"
|
||||
}
|
||||
},
|
||||
"user_permissions": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Permission"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.DAUEntry": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -9906,15 +10343,25 @@ const docTemplate = `{
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"organization_display_name": {
|
||||
"type": "string"
|
||||
},
|
||||
"organization_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"organization_name": {
|
||||
"type": "string"
|
||||
},
|
||||
"quota_allowance": {
|
||||
"type": "integer"
|
||||
},
|
||||
"source": {
|
||||
"$ref": "#/definitions/codersdk.GroupSource"
|
||||
},
|
||||
"total_member_count": {
|
||||
"description": "How many members are in this group. Shows the total count,\neven if the user is not authorized to read group member details.\nMay be greater than ` + "`" + `len(Group.Members)` + "`" + `.",
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -10202,6 +10649,66 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.NotificationMethodsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"available": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.NotificationPreference": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"disabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"updated_at": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.NotificationTemplate": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"actions": {
|
||||
"type": "string"
|
||||
},
|
||||
"body_template": {
|
||||
"type": "string"
|
||||
},
|
||||
"group": {
|
||||
"type": "string"
|
||||
},
|
||||
"id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"kind": {
|
||||
"type": "string"
|
||||
},
|
||||
"method": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"title_template": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.NotificationsConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -10576,6 +11083,15 @@ const docTemplate = `{
|
||||
"name_field": {
|
||||
"type": "string"
|
||||
},
|
||||
"organization_assign_default": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"organization_field": {
|
||||
"type": "string"
|
||||
},
|
||||
"organization_mapping": {
|
||||
"type": "object"
|
||||
},
|
||||
"scopes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
@@ -11216,7 +11732,10 @@ const docTemplate = `{
|
||||
"deployment_stats",
|
||||
"file",
|
||||
"group",
|
||||
"group_member",
|
||||
"license",
|
||||
"notification_preference",
|
||||
"notification_template",
|
||||
"oauth2_app",
|
||||
"oauth2_app_code_token",
|
||||
"oauth2_app_secret",
|
||||
@@ -11244,7 +11763,10 @@ const docTemplate = `{
|
||||
"ResourceDeploymentStats",
|
||||
"ResourceFile",
|
||||
"ResourceGroup",
|
||||
"ResourceGroupMember",
|
||||
"ResourceLicense",
|
||||
"ResourceNotificationPreference",
|
||||
"ResourceNotificationTemplate",
|
||||
"ResourceOauth2App",
|
||||
"ResourceOauth2AppCodeToken",
|
||||
"ResourceOauth2AppSecret",
|
||||
@@ -12513,6 +13035,17 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.UpdateUserNotificationPreferences": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"template_disabled_map": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.UpdateUserPasswordRequest": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
|
||||
Generated
+14363
-13876
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,10 @@ package appearance
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/coder/coder/v2/buildinfo"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
@@ -10,36 +13,50 @@ type Fetcher interface {
|
||||
Fetch(ctx context.Context) (codersdk.AppearanceConfig, error)
|
||||
}
|
||||
|
||||
var DefaultSupportLinks = []codersdk.LinkConfig{
|
||||
{
|
||||
Name: "Documentation",
|
||||
Target: "https://coder.com/docs/coder-oss",
|
||||
Icon: "docs",
|
||||
},
|
||||
{
|
||||
Name: "Report a bug",
|
||||
Target: "https://github.com/coder/coder/issues/new?labels=needs+grooming&body={CODER_BUILD_INFO}",
|
||||
Icon: "bug",
|
||||
},
|
||||
{
|
||||
Name: "Join the Coder Discord",
|
||||
Target: "https://coder.com/chat?utm_source=coder&utm_medium=coder&utm_campaign=server-footer",
|
||||
Icon: "chat",
|
||||
},
|
||||
{
|
||||
Name: "Star the Repo",
|
||||
Target: "https://github.com/coder/coder",
|
||||
Icon: "star",
|
||||
},
|
||||
func DefaultSupportLinks(docsURL string) []codersdk.LinkConfig {
|
||||
version := buildinfo.Version()
|
||||
if docsURL == "" {
|
||||
docsURL = "https://coder.com/docs/@" + strings.Split(version, "-")[0]
|
||||
}
|
||||
buildInfo := fmt.Sprintf("Version: [`%s`](%s)", version, buildinfo.ExternalURL())
|
||||
|
||||
return []codersdk.LinkConfig{
|
||||
{
|
||||
Name: "Documentation",
|
||||
Target: docsURL,
|
||||
Icon: "docs",
|
||||
},
|
||||
{
|
||||
Name: "Report a bug",
|
||||
Target: "https://github.com/coder/coder/issues/new?labels=needs+grooming&body=" + buildInfo,
|
||||
Icon: "bug",
|
||||
},
|
||||
{
|
||||
Name: "Join the Coder Discord",
|
||||
Target: "https://coder.com/chat?utm_source=coder&utm_medium=coder&utm_campaign=server-footer",
|
||||
Icon: "chat",
|
||||
},
|
||||
{
|
||||
Name: "Star the Repo",
|
||||
Target: "https://github.com/coder/coder",
|
||||
Icon: "star",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type AGPLFetcher struct{}
|
||||
type AGPLFetcher struct {
|
||||
docsURL string
|
||||
}
|
||||
|
||||
func (AGPLFetcher) Fetch(context.Context) (codersdk.AppearanceConfig, error) {
|
||||
func (f AGPLFetcher) Fetch(context.Context) (codersdk.AppearanceConfig, error) {
|
||||
return codersdk.AppearanceConfig{
|
||||
AnnouncementBanners: []codersdk.BannerConfig{},
|
||||
SupportLinks: DefaultSupportLinks,
|
||||
SupportLinks: DefaultSupportLinks(f.docsURL),
|
||||
}, nil
|
||||
}
|
||||
|
||||
var DefaultFetcher Fetcher = AGPLFetcher{}
|
||||
func NewDefaultFetcher(docsURL string) Fetcher {
|
||||
return &AGPLFetcher{
|
||||
docsURL: docsURL,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,8 @@ type Auditable interface {
|
||||
database.OAuth2ProviderAppSecret |
|
||||
database.CustomRole |
|
||||
database.AuditableOrganizationMember |
|
||||
database.Organization
|
||||
database.Organization |
|
||||
database.NotificationTemplate
|
||||
}
|
||||
|
||||
// Map is a map of changed fields in an audited resource. It maps field names to
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/coderd/httpmw"
|
||||
@@ -117,6 +118,8 @@ func ResourceTarget[T Auditable](tgt T) string {
|
||||
return typed.Username
|
||||
case database.Organization:
|
||||
return typed.Name
|
||||
case database.NotificationTemplate:
|
||||
return typed.Name
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown resource %T for ResourceTarget", tgt))
|
||||
}
|
||||
@@ -163,6 +166,8 @@ func ResourceID[T Auditable](tgt T) uuid.UUID {
|
||||
return typed.UserID
|
||||
case database.Organization:
|
||||
return typed.ID
|
||||
case database.NotificationTemplate:
|
||||
return typed.ID
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown resource %T for ResourceID", tgt))
|
||||
}
|
||||
@@ -206,6 +211,8 @@ func ResourceType[T Auditable](tgt T) database.ResourceType {
|
||||
return database.ResourceTypeOrganizationMember
|
||||
case database.Organization:
|
||||
return database.ResourceTypeOrganization
|
||||
case database.NotificationTemplate:
|
||||
return database.ResourceTypeNotificationTemplate
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown resource %T for ResourceType", typed))
|
||||
}
|
||||
@@ -251,6 +258,8 @@ func ResourceRequiresOrgID[T Auditable]() bool {
|
||||
return true
|
||||
case database.Organization:
|
||||
return true
|
||||
case database.NotificationTemplate:
|
||||
return false
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown resource %T for ResourceRequiresOrgID", tgt))
|
||||
}
|
||||
|
||||
@@ -8,11 +8,13 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/audit"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
@@ -296,10 +298,11 @@ func (e *Executor) runOnce(t time.Time) Stats {
|
||||
|
||||
if _, err := e.notificationsEnqueuer.Enqueue(e.ctx, ws.OwnerID, notifications.TemplateWorkspaceAutoUpdated,
|
||||
map[string]string{
|
||||
"name": ws.Name,
|
||||
"initiator": "autobuild",
|
||||
"reason": nextBuildReason,
|
||||
"template_version_name": activeTemplateVersion.Name,
|
||||
"name": ws.Name,
|
||||
"initiator": "autobuild",
|
||||
"reason": nextBuildReason,
|
||||
"template_version_name": activeTemplateVersion.Name,
|
||||
"template_version_message": activeTemplateVersion.Message,
|
||||
}, "autobuild",
|
||||
// Associate this notification with all the related entities.
|
||||
ws.ID, ws.OwnerID, ws.TemplateID, ws.OrganizationID,
|
||||
@@ -321,6 +324,7 @@ func (e *Executor) runOnce(t time.Time) Stats {
|
||||
}
|
||||
}
|
||||
if shouldNotifyDormancy {
|
||||
dormantTime := dbtime.Now().Add(time.Duration(tmpl.TimeTilDormant))
|
||||
_, err = e.notificationsEnqueuer.Enqueue(
|
||||
e.ctx,
|
||||
ws.OwnerID,
|
||||
@@ -328,7 +332,7 @@ func (e *Executor) runOnce(t time.Time) Stats {
|
||||
map[string]string{
|
||||
"name": ws.Name,
|
||||
"reason": "inactivity exceeded the dormancy threshold",
|
||||
"timeTilDormant": time.Duration(tmpl.TimeTilDormant).String(),
|
||||
"timeTilDormant": humanize.Time(dormantTime),
|
||||
},
|
||||
"lifecycle_executor",
|
||||
ws.ID,
|
||||
|
||||
+56
-18
@@ -37,6 +37,8 @@ import (
|
||||
"tailscale.com/util/singleflight"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"github.com/coder/coder/v2/coderd/entitlements"
|
||||
"github.com/coder/coder/v2/coderd/idpsync"
|
||||
"github.com/coder/quartz"
|
||||
"github.com/coder/serpent"
|
||||
|
||||
@@ -157,6 +159,9 @@ type Options struct {
|
||||
TrialGenerator func(ctx context.Context, body codersdk.LicensorTrialRequest) error
|
||||
// RefreshEntitlements is used to set correct entitlements after creating first user and generating trial license.
|
||||
RefreshEntitlements func(ctx context.Context) error
|
||||
// Entitlements can come from the enterprise caller if enterprise code is
|
||||
// included.
|
||||
Entitlements *entitlements.Set
|
||||
// PostAuthAdditionalHeadersFunc is used to add additional headers to the response
|
||||
// after a successful authentication.
|
||||
// This is somewhat janky, but seemingly the only reasonable way to add a header
|
||||
@@ -182,6 +187,9 @@ type Options struct {
|
||||
// AppSecurityKey is the crypto key used to sign and encrypt tokens related to
|
||||
// workspace applications. It consists of both a signing and encryption key.
|
||||
AppSecurityKey workspaceapps.SecurityKey
|
||||
// CoordinatorResumeTokenProvider is used to provide and validate resume
|
||||
// tokens issued by and passed to the coordinator DRPC API.
|
||||
CoordinatorResumeTokenProvider tailnet.ResumeTokenProvider
|
||||
|
||||
HealthcheckFunc func(ctx context.Context, apiKey string) *healthsdk.HealthcheckReport
|
||||
HealthcheckTimeout time.Duration
|
||||
@@ -236,6 +244,9 @@ type Options struct {
|
||||
WorkspaceUsageTracker *workspacestats.UsageTracker
|
||||
// NotificationsEnqueuer handles enqueueing notifications for delivery by SMTP, webhook, etc.
|
||||
NotificationsEnqueuer notifications.Enqueuer
|
||||
|
||||
// IDPSync holds all configured values for syncing external IDP users into Coder.
|
||||
IDPSync idpsync.IDPSync
|
||||
}
|
||||
|
||||
// @title Coder API
|
||||
@@ -260,6 +271,16 @@ func New(options *Options) *API {
|
||||
if options == nil {
|
||||
options = &Options{}
|
||||
}
|
||||
if options.Entitlements == nil {
|
||||
options.Entitlements = entitlements.New()
|
||||
}
|
||||
if options.IDPSync == nil {
|
||||
options.IDPSync = idpsync.NewAGPLSync(options.Logger, idpsync.SyncSettings{
|
||||
OrganizationField: options.DeploymentValues.OIDC.OrganizationField.Value(),
|
||||
OrganizationMapping: options.DeploymentValues.OIDC.OrganizationMapping.Value,
|
||||
OrganizationAssignDefault: options.DeploymentValues.OIDC.OrganizationAssignDefault.Value(),
|
||||
})
|
||||
}
|
||||
if options.NewTicker == nil {
|
||||
options.NewTicker = func(duration time.Duration) (tick <-chan time.Time, done func()) {
|
||||
ticker := time.NewTicker(duration)
|
||||
@@ -464,7 +485,6 @@ func New(options *Options) *API {
|
||||
TemplateScheduleStore: options.TemplateScheduleStore,
|
||||
UserQuietHoursScheduleStore: options.UserQuietHoursScheduleStore,
|
||||
AccessControlStore: options.AccessControlStore,
|
||||
CustomRoleHandler: atomic.Pointer[CustomRoleHandler]{},
|
||||
Experiments: experiments,
|
||||
healthCheckGroup: &singleflight.Group[string, *healthsdk.HealthcheckReport]{},
|
||||
Acquirer: provisionerdserver.NewAcquirer(
|
||||
@@ -476,19 +496,19 @@ func New(options *Options) *API {
|
||||
dbRolluper: options.DatabaseRolluper,
|
||||
}
|
||||
|
||||
var customRoleHandler CustomRoleHandler = &agplCustomRoleHandler{}
|
||||
api.CustomRoleHandler.Store(&customRoleHandler)
|
||||
api.AppearanceFetcher.Store(&appearance.DefaultFetcher)
|
||||
f := appearance.NewDefaultFetcher(api.DeploymentValues.DocsURL.String())
|
||||
api.AppearanceFetcher.Store(&f)
|
||||
api.PortSharer.Store(&portsharing.DefaultPortSharer)
|
||||
buildInfo := codersdk.BuildInfoResponse{
|
||||
ExternalURL: buildinfo.ExternalURL(),
|
||||
Version: buildinfo.Version(),
|
||||
AgentAPIVersion: AgentAPIVersionREST,
|
||||
DashboardURL: api.AccessURL.String(),
|
||||
WorkspaceProxy: false,
|
||||
UpgradeMessage: api.DeploymentValues.CLIUpgradeMessage.String(),
|
||||
DeploymentID: api.DeploymentID,
|
||||
Telemetry: api.Telemetry.Enabled(),
|
||||
ExternalURL: buildinfo.ExternalURL(),
|
||||
Version: buildinfo.Version(),
|
||||
AgentAPIVersion: AgentAPIVersionREST,
|
||||
ProvisionerAPIVersion: proto.CurrentVersion.String(),
|
||||
DashboardURL: api.AccessURL.String(),
|
||||
WorkspaceProxy: false,
|
||||
UpgradeMessage: api.DeploymentValues.CLIUpgradeMessage.String(),
|
||||
DeploymentID: api.DeploymentID,
|
||||
Telemetry: api.Telemetry.Enabled(),
|
||||
}
|
||||
api.SiteHandler = site.New(&site.Options{
|
||||
BinFS: binFS,
|
||||
@@ -499,6 +519,7 @@ func New(options *Options) *API {
|
||||
DocsURL: options.DeploymentValues.DocsURL.String(),
|
||||
AppearanceFetcher: &api.AppearanceFetcher,
|
||||
BuildInfo: buildInfo,
|
||||
Entitlements: options.Entitlements,
|
||||
})
|
||||
api.SiteHandler.Experiments.Store(&experiments)
|
||||
|
||||
@@ -586,12 +607,16 @@ func New(options *Options) *API {
|
||||
api.Options.NetworkTelemetryBatchMaxSize,
|
||||
api.handleNetworkTelemetry,
|
||||
)
|
||||
if options.CoordinatorResumeTokenProvider == nil {
|
||||
panic("CoordinatorResumeTokenProvider is nil")
|
||||
}
|
||||
api.TailnetClientService, err = tailnet.NewClientService(tailnet.ClientServiceOptions{
|
||||
Logger: api.Logger.Named("tailnetclient"),
|
||||
CoordPtr: &api.TailnetCoordinator,
|
||||
DERPMapUpdateFrequency: api.Options.DERPMapUpdateFrequency,
|
||||
DERPMapFn: api.DERPMap,
|
||||
NetworkTelemetryHandler: api.NetworkTelemetryBatcher.Handler,
|
||||
ResumeTokenProvider: api.Options.CoordinatorResumeTokenProvider,
|
||||
})
|
||||
if err != nil {
|
||||
api.Logger.Fatal(api.ctx, "failed to initialize tailnet client service", slog.Error(err))
|
||||
@@ -616,6 +641,9 @@ func New(options *Options) *API {
|
||||
options.WorkspaceAppsStatsCollectorOptions.Reporter = api.statsReporter
|
||||
}
|
||||
|
||||
if options.AppSecurityKey.IsZero() {
|
||||
api.Logger.Fatal(api.ctx, "app security key cannot be zero")
|
||||
}
|
||||
api.workspaceAppServer = &workspaceapps.Server{
|
||||
Logger: workspaceAppsLogger,
|
||||
|
||||
@@ -874,7 +902,7 @@ func New(options *Options) *API {
|
||||
r.Route("/templates", func(r chi.Router) {
|
||||
r.Post("/", api.postTemplateByOrganization)
|
||||
r.Get("/", api.templatesByOrganization())
|
||||
r.Get("/examples", api.templateExamples)
|
||||
r.Get("/examples", api.templateExamplesByOrganization)
|
||||
r.Route("/{templatename}", func(r chi.Router) {
|
||||
r.Get("/", api.templateByOrganizationAndName)
|
||||
r.Route("/versions/{templateversionname}", func(r chi.Router) {
|
||||
@@ -887,8 +915,6 @@ func New(options *Options) *API {
|
||||
r.Get("/", api.listMembers)
|
||||
r.Route("/roles", func(r chi.Router) {
|
||||
r.Get("/", api.assignableOrgRoles)
|
||||
r.With(httpmw.RequireExperiment(api.Experiments, codersdk.ExperimentCustomRoles)).
|
||||
Patch("/", api.patchOrgRoles)
|
||||
})
|
||||
|
||||
r.Route("/{user}", func(r chi.Router) {
|
||||
@@ -920,6 +946,7 @@ func New(options *Options) *API {
|
||||
apiKeyMiddleware,
|
||||
)
|
||||
r.Get("/", api.fetchTemplates(nil))
|
||||
r.Get("/examples", api.templateExamples)
|
||||
r.Route("/{template}", func(r chi.Router) {
|
||||
r.Use(
|
||||
httpmw.ExtractTemplateParam(options.Database),
|
||||
@@ -1050,6 +1077,12 @@ func New(options *Options) *API {
|
||||
})
|
||||
r.Get("/gitsshkey", api.gitSSHKey)
|
||||
r.Put("/gitsshkey", api.regenerateGitSSHKey)
|
||||
r.Route("/notifications", func(r chi.Router) {
|
||||
r.Route("/preferences", func(r chi.Router) {
|
||||
r.Get("/", api.userNotificationPreferences)
|
||||
r.Put("/", api.putUserNotificationPreferences)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1243,9 +1276,16 @@ func New(options *Options) *API {
|
||||
})
|
||||
})
|
||||
r.Route("/notifications", func(r chi.Router) {
|
||||
r.Use(apiKeyMiddleware)
|
||||
r.Use(
|
||||
apiKeyMiddleware,
|
||||
httpmw.RequireExperiment(api.Experiments, codersdk.ExperimentNotifications),
|
||||
)
|
||||
r.Get("/settings", api.notificationsSettings)
|
||||
r.Put("/settings", api.putNotificationsSettings)
|
||||
r.Route("/templates", func(r chi.Router) {
|
||||
r.Get("/system", api.systemNotificationTemplates)
|
||||
})
|
||||
r.Get("/dispatch-methods", api.notificationDispatchMethods)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1327,8 +1367,6 @@ type API struct {
|
||||
// passed to dbauthz.
|
||||
AccessControlStore *atomic.Pointer[dbauthz.AccessControlStore]
|
||||
PortSharer atomic.Pointer[portsharing.PortSharer]
|
||||
// CustomRoleHandler is the AGPL/Enterprise implementation for custom roles.
|
||||
CustomRoleHandler atomic.Pointer[CustomRoleHandler]
|
||||
|
||||
HTTPAuth *HTTPAuthorizer
|
||||
|
||||
|
||||
@@ -353,16 +353,28 @@ func (s *PreparedRecorder) CompileToSQL(ctx context.Context, cfg regosql.Convert
|
||||
return s.prepped.CompileToSQL(ctx, cfg)
|
||||
}
|
||||
|
||||
// FakeAuthorizer is an Authorizer that always returns the same error.
|
||||
// FakeAuthorizer is an Authorizer that will return an error based on the
|
||||
// "ConditionalReturn" function. By default, **no error** is returned.
|
||||
// Meaning 'FakeAuthorizer' by default will never return "unauthorized".
|
||||
type FakeAuthorizer struct {
|
||||
// AlwaysReturn is the error that will be returned by Authorize.
|
||||
AlwaysReturn error
|
||||
ConditionalReturn func(context.Context, rbac.Subject, policy.Action, rbac.Object) error
|
||||
}
|
||||
|
||||
var _ rbac.Authorizer = (*FakeAuthorizer)(nil)
|
||||
|
||||
func (d *FakeAuthorizer) Authorize(_ context.Context, _ rbac.Subject, _ policy.Action, _ rbac.Object) error {
|
||||
return d.AlwaysReturn
|
||||
// AlwaysReturn is the error that will be returned by Authorize.
|
||||
func (d *FakeAuthorizer) AlwaysReturn(err error) *FakeAuthorizer {
|
||||
d.ConditionalReturn = func(_ context.Context, _ rbac.Subject, _ policy.Action, _ rbac.Object) error {
|
||||
return err
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *FakeAuthorizer) Authorize(ctx context.Context, subject rbac.Subject, action policy.Action, object rbac.Object) error {
|
||||
if d.ConditionalReturn != nil {
|
||||
return d.ConditionalReturn(ctx, subject, action, object)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *FakeAuthorizer) Prepare(_ context.Context, subject rbac.Subject, action policy.Action, _ string) (rbac.PreparedAuthorized, error) {
|
||||
|
||||
@@ -66,6 +66,7 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/httpmw"
|
||||
"github.com/coder/coder/v2/coderd/notifications"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
"github.com/coder/coder/v2/coderd/rbac/policy"
|
||||
"github.com/coder/coder/v2/coderd/schedule"
|
||||
"github.com/coder/coder/v2/coderd/telemetry"
|
||||
"github.com/coder/coder/v2/coderd/unhanger"
|
||||
@@ -96,25 +97,26 @@ type Options struct {
|
||||
// AccessURL denotes a custom access URL. By default we use the httptest
|
||||
// server's URL. Setting this may result in unexpected behavior (especially
|
||||
// with running agents).
|
||||
AccessURL *url.URL
|
||||
AppHostname string
|
||||
AWSCertificates awsidentity.Certificates
|
||||
Authorizer rbac.Authorizer
|
||||
AzureCertificates x509.VerifyOptions
|
||||
GithubOAuth2Config *coderd.GithubOAuth2Config
|
||||
RealIPConfig *httpmw.RealIPConfig
|
||||
OIDCConfig *coderd.OIDCConfig
|
||||
GoogleTokenValidator *idtoken.Validator
|
||||
SSHKeygenAlgorithm gitsshkey.Algorithm
|
||||
AutobuildTicker <-chan time.Time
|
||||
AutobuildStats chan<- autobuild.Stats
|
||||
Auditor audit.Auditor
|
||||
TLSCertificates []tls.Certificate
|
||||
ExternalAuthConfigs []*externalauth.Config
|
||||
TrialGenerator func(ctx context.Context, body codersdk.LicensorTrialRequest) error
|
||||
RefreshEntitlements func(ctx context.Context) error
|
||||
TemplateScheduleStore schedule.TemplateScheduleStore
|
||||
Coordinator tailnet.Coordinator
|
||||
AccessURL *url.URL
|
||||
AppHostname string
|
||||
AWSCertificates awsidentity.Certificates
|
||||
Authorizer rbac.Authorizer
|
||||
AzureCertificates x509.VerifyOptions
|
||||
GithubOAuth2Config *coderd.GithubOAuth2Config
|
||||
RealIPConfig *httpmw.RealIPConfig
|
||||
OIDCConfig *coderd.OIDCConfig
|
||||
GoogleTokenValidator *idtoken.Validator
|
||||
SSHKeygenAlgorithm gitsshkey.Algorithm
|
||||
AutobuildTicker <-chan time.Time
|
||||
AutobuildStats chan<- autobuild.Stats
|
||||
Auditor audit.Auditor
|
||||
TLSCertificates []tls.Certificate
|
||||
ExternalAuthConfigs []*externalauth.Config
|
||||
TrialGenerator func(ctx context.Context, body codersdk.LicensorTrialRequest) error
|
||||
RefreshEntitlements func(ctx context.Context) error
|
||||
TemplateScheduleStore schedule.TemplateScheduleStore
|
||||
Coordinator tailnet.Coordinator
|
||||
CoordinatorResumeTokenProvider tailnet.ResumeTokenProvider
|
||||
|
||||
HealthcheckFunc func(ctx context.Context, apiKey string) *healthsdk.HealthcheckReport
|
||||
HealthcheckTimeout time.Duration
|
||||
@@ -204,7 +206,7 @@ func NewOptions(t testing.TB, options *Options) (func(http.Handler), context.Can
|
||||
options = &Options{}
|
||||
}
|
||||
if options.Logger == nil {
|
||||
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug).Named("coderd")
|
||||
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug).Named("coderd")
|
||||
options.Logger = &logger
|
||||
}
|
||||
if options.GoogleTokenValidator == nil {
|
||||
@@ -240,6 +242,9 @@ func NewOptions(t testing.TB, options *Options) (func(http.Handler), context.Can
|
||||
if options.Database == nil {
|
||||
options.Database, options.Pubsub = dbtestutil.NewDB(t)
|
||||
}
|
||||
if options.CoordinatorResumeTokenProvider == nil {
|
||||
options.CoordinatorResumeTokenProvider = tailnet.NewInsecureTestResumeTokenProvider()
|
||||
}
|
||||
|
||||
if options.NotificationsEnqueuer == nil {
|
||||
options.NotificationsEnqueuer = new(testutil.FakeNotificationsEnqueuer)
|
||||
@@ -264,8 +269,19 @@ func NewOptions(t testing.TB, options *Options) (func(http.Handler), context.Can
|
||||
if options.DeploymentValues == nil {
|
||||
options.DeploymentValues = DeploymentValues(t)
|
||||
}
|
||||
// This value is not safe to run in parallel. Force it to be false.
|
||||
options.DeploymentValues.DisableOwnerWorkspaceExec = false
|
||||
// DisableOwnerWorkspaceExec modifies the 'global' RBAC roles. Fast-fail tests if we detect this.
|
||||
if !options.DeploymentValues.DisableOwnerWorkspaceExec.Value() {
|
||||
ownerSubj := rbac.Subject{
|
||||
Roles: rbac.RoleIdentifiers{rbac.RoleOwner()},
|
||||
Scope: rbac.ScopeAll,
|
||||
}
|
||||
if err := options.Authorizer.Authorize(context.Background(), ownerSubj, policy.ActionSSH, rbac.ResourceWorkspace); err != nil {
|
||||
if rbac.IsUnauthorizedError(err) {
|
||||
t.Fatal("Side-effect of DisableOwnerWorkspaceExec detected in unrelated test. Please move the test that requires DisableOwnerWorkspaceExec to its own package so that it does not impact other tests!")
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// If no ratelimits are set, disable all rate limiting for tests.
|
||||
if options.APIRateLimit == 0 {
|
||||
@@ -492,6 +508,7 @@ func NewOptions(t testing.TB, options *Options) (func(http.Handler), context.Can
|
||||
TailnetCoordinator: options.Coordinator,
|
||||
BaseDERPMap: derpMap,
|
||||
DERPMapUpdateFrequency: 150 * time.Millisecond,
|
||||
CoordinatorResumeTokenProvider: options.CoordinatorResumeTokenProvider,
|
||||
MetricsCacheRefreshInterval: options.MetricsCacheRefreshInterval,
|
||||
AgentStatsRefreshInterval: options.AgentStatsRefreshInterval,
|
||||
DeploymentValues: options.DeploymentValues,
|
||||
@@ -641,11 +658,11 @@ func CreateFirstUser(t testing.TB, client *codersdk.Client) codersdk.CreateFirst
|
||||
// CreateAnotherUser creates and authenticates a new user.
|
||||
// Roles can include org scoped roles with 'roleName:<organization_id>'
|
||||
func CreateAnotherUser(t testing.TB, client *codersdk.Client, organizationID uuid.UUID, roles ...rbac.RoleIdentifier) (*codersdk.Client, codersdk.User) {
|
||||
return createAnotherUserRetry(t, client, organizationID, 5, roles)
|
||||
return createAnotherUserRetry(t, client, []uuid.UUID{organizationID}, 5, roles)
|
||||
}
|
||||
|
||||
func CreateAnotherUserMutators(t testing.TB, client *codersdk.Client, organizationID uuid.UUID, roles []rbac.RoleIdentifier, mutators ...func(r *codersdk.CreateUserRequest)) (*codersdk.Client, codersdk.User) {
|
||||
return createAnotherUserRetry(t, client, organizationID, 5, roles, mutators...)
|
||||
func CreateAnotherUserMutators(t testing.TB, client *codersdk.Client, organizationID uuid.UUID, roles []rbac.RoleIdentifier, mutators ...func(r *codersdk.CreateUserRequestWithOrgs)) (*codersdk.Client, codersdk.User) {
|
||||
return createAnotherUserRetry(t, client, []uuid.UUID{organizationID}, 5, roles, mutators...)
|
||||
}
|
||||
|
||||
// AuthzUserSubject does not include the user's groups.
|
||||
@@ -671,31 +688,31 @@ func AuthzUserSubject(user codersdk.User, orgID uuid.UUID) rbac.Subject {
|
||||
}
|
||||
}
|
||||
|
||||
func createAnotherUserRetry(t testing.TB, client *codersdk.Client, organizationID uuid.UUID, retries int, roles []rbac.RoleIdentifier, mutators ...func(r *codersdk.CreateUserRequest)) (*codersdk.Client, codersdk.User) {
|
||||
req := codersdk.CreateUserRequest{
|
||||
Email: namesgenerator.GetRandomName(10) + "@coder.com",
|
||||
Username: RandomUsername(t),
|
||||
Name: RandomName(t),
|
||||
Password: "SomeSecurePassword!",
|
||||
OrganizationID: organizationID,
|
||||
func createAnotherUserRetry(t testing.TB, client *codersdk.Client, organizationIDs []uuid.UUID, retries int, roles []rbac.RoleIdentifier, mutators ...func(r *codersdk.CreateUserRequestWithOrgs)) (*codersdk.Client, codersdk.User) {
|
||||
req := codersdk.CreateUserRequestWithOrgs{
|
||||
Email: namesgenerator.GetRandomName(10) + "@coder.com",
|
||||
Username: RandomUsername(t),
|
||||
Name: RandomName(t),
|
||||
Password: "SomeSecurePassword!",
|
||||
OrganizationIDs: organizationIDs,
|
||||
}
|
||||
for _, m := range mutators {
|
||||
m(&req)
|
||||
}
|
||||
|
||||
user, err := client.CreateUser(context.Background(), req)
|
||||
user, err := client.CreateUserWithOrgs(context.Background(), req)
|
||||
var apiError *codersdk.Error
|
||||
// If the user already exists by username or email conflict, try again up to "retries" times.
|
||||
if err != nil && retries >= 0 && xerrors.As(err, &apiError) {
|
||||
if apiError.StatusCode() == http.StatusConflict {
|
||||
retries--
|
||||
return createAnotherUserRetry(t, client, organizationID, retries, roles)
|
||||
return createAnotherUserRetry(t, client, organizationIDs, retries, roles)
|
||||
}
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
var sessionToken string
|
||||
if req.DisableLogin || req.UserLoginType == codersdk.LoginTypeNone {
|
||||
if req.UserLoginType == codersdk.LoginTypeNone {
|
||||
// Cannot log in with a disabled login user. So make it an api key from
|
||||
// the client making this user.
|
||||
token, err := client.CreateToken(context.Background(), user.ID.String(), codersdk.CreateTokenRequest{
|
||||
@@ -758,8 +775,9 @@ func createAnotherUserRetry(t testing.TB, client *codersdk.Client, organizationI
|
||||
require.NoError(t, err, "update site roles")
|
||||
|
||||
// isMember keeps track of which orgs the user was added to as a member
|
||||
isMember := map[uuid.UUID]bool{
|
||||
organizationID: true,
|
||||
isMember := make(map[uuid.UUID]bool)
|
||||
for _, orgID := range organizationIDs {
|
||||
isMember[orgID] = true
|
||||
}
|
||||
|
||||
// Update org roles
|
||||
@@ -1380,10 +1398,13 @@ func SDKError(t testing.TB, err error) *codersdk.Error {
|
||||
return cerr
|
||||
}
|
||||
|
||||
func DeploymentValues(t testing.TB) *codersdk.DeploymentValues {
|
||||
var cfg codersdk.DeploymentValues
|
||||
func DeploymentValues(t testing.TB, mut ...func(*codersdk.DeploymentValues)) *codersdk.DeploymentValues {
|
||||
cfg := &codersdk.DeploymentValues{}
|
||||
opts := cfg.Options()
|
||||
err := opts.SetDefaults()
|
||||
require.NoError(t, err)
|
||||
return &cfg
|
||||
for _, fn := range mut {
|
||||
fn(cfg)
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
@@ -10,7 +10,10 @@ import (
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/feature/rds/auth"
|
||||
"github.com/lib/pq"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
)
|
||||
|
||||
type awsIamRdsDriver struct {
|
||||
@@ -18,7 +21,10 @@ type awsIamRdsDriver struct {
|
||||
cfg aws.Config
|
||||
}
|
||||
|
||||
var _ driver.Driver = &awsIamRdsDriver{}
|
||||
var (
|
||||
_ driver.Driver = &awsIamRdsDriver{}
|
||||
_ database.ConnectorCreator = &awsIamRdsDriver{}
|
||||
)
|
||||
|
||||
// Register initializes and registers our aws iam rds wrapped database driver.
|
||||
func Register(ctx context.Context, parentName string) (string, error) {
|
||||
@@ -65,6 +71,16 @@ func (d *awsIamRdsDriver) Open(name string) (driver.Conn, error) {
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// Connector returns a driver.Connector that fetches a new authentication token for each connection.
|
||||
func (d *awsIamRdsDriver) Connector(name string) (driver.Connector, error) {
|
||||
connector := &connector{
|
||||
url: name,
|
||||
cfg: d.cfg,
|
||||
}
|
||||
|
||||
return connector, nil
|
||||
}
|
||||
|
||||
func getAuthenticatedURL(cfg aws.Config, dbURL string) (string, error) {
|
||||
nURL, err := url.Parse(dbURL)
|
||||
if err != nil {
|
||||
@@ -82,3 +98,37 @@ func getAuthenticatedURL(cfg aws.Config, dbURL string) (string, error) {
|
||||
|
||||
return nURL.String(), nil
|
||||
}
|
||||
|
||||
type connector struct {
|
||||
url string
|
||||
cfg aws.Config
|
||||
dialer pq.Dialer
|
||||
}
|
||||
|
||||
var _ database.DialerConnector = &connector{}
|
||||
|
||||
func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
|
||||
nURL, err := getAuthenticatedURL(c.cfg, c.url)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("assigning authentication token to url: %w", err)
|
||||
}
|
||||
|
||||
nc, err := pq.NewConnector(nURL)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("creating new connector: %w", err)
|
||||
}
|
||||
|
||||
if c.dialer != nil {
|
||||
nc.Dialer(c.dialer)
|
||||
}
|
||||
|
||||
return nc.Connect(ctx)
|
||||
}
|
||||
|
||||
func (*connector) Driver() driver.Driver {
|
||||
return &pq.Driver{}
|
||||
}
|
||||
|
||||
func (c *connector) Dialer(dialer pq.Dialer) {
|
||||
c.dialer = dialer
|
||||
}
|
||||
|
||||
@@ -7,10 +7,11 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
|
||||
"github.com/coder/coder/v2/cli"
|
||||
awsrdsiam "github.com/coder/coder/v2/coderd/database/awsiamrds"
|
||||
"github.com/coder/coder/v2/coderd/database/awsiamrds"
|
||||
"github.com/coder/coder/v2/coderd/database/pubsub"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
@@ -22,13 +23,15 @@ func TestDriver(t *testing.T) {
|
||||
// export DBAWSIAMRDS_TEST_URL="postgres://user@host:5432/dbname";
|
||||
url := os.Getenv("DBAWSIAMRDS_TEST_URL")
|
||||
if url == "" {
|
||||
t.Log("skipping test; no DBAWSIAMRDS_TEST_URL set")
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
sqlDriver, err := awsrdsiam.Register(ctx, "postgres")
|
||||
sqlDriver, err := awsiamrds.Register(ctx, "postgres")
|
||||
require.NoError(t, err)
|
||||
|
||||
db, err := cli.ConnectToPostgres(ctx, slogtest.Make(t, nil), sqlDriver, url)
|
||||
@@ -47,4 +50,23 @@ func TestDriver(t *testing.T) {
|
||||
var one int
|
||||
require.NoError(t, i.Scan(&one))
|
||||
require.Equal(t, 1, one)
|
||||
|
||||
ps, err := pubsub.New(ctx, logger, db, url)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotChan := make(chan struct{})
|
||||
subCancel, err := ps.Subscribe("test", func(_ context.Context, _ []byte) {
|
||||
close(gotChan)
|
||||
})
|
||||
defer subCancel()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ps.Publish("test", []byte("hello"))
|
||||
require.NoError(t, err)
|
||||
|
||||
select {
|
||||
case <-gotChan:
|
||||
case <-ctx.Done():
|
||||
require.Fail(t, "timed out waiting for message")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
// ConnectorCreator is a driver.Driver that can create a driver.Connector.
|
||||
type ConnectorCreator interface {
|
||||
driver.Driver
|
||||
Connector(name string) (driver.Connector, error)
|
||||
}
|
||||
|
||||
// DialerConnector is a driver.Connector that can set a pq.Dialer.
|
||||
type DialerConnector interface {
|
||||
driver.Connector
|
||||
Dialer(dialer pq.Dialer)
|
||||
}
|
||||
@@ -159,6 +159,35 @@ func ReducedUser(user database.User) codersdk.ReducedUser {
|
||||
}
|
||||
}
|
||||
|
||||
func UserFromGroupMember(member database.GroupMember) database.User {
|
||||
return database.User{
|
||||
ID: member.UserID,
|
||||
Email: member.UserEmail,
|
||||
Username: member.UserUsername,
|
||||
HashedPassword: member.UserHashedPassword,
|
||||
CreatedAt: member.UserCreatedAt,
|
||||
UpdatedAt: member.UserUpdatedAt,
|
||||
Status: member.UserStatus,
|
||||
RBACRoles: member.UserRbacRoles,
|
||||
LoginType: member.UserLoginType,
|
||||
AvatarURL: member.UserAvatarUrl,
|
||||
Deleted: member.UserDeleted,
|
||||
LastSeenAt: member.UserLastSeenAt,
|
||||
QuietHoursSchedule: member.UserQuietHoursSchedule,
|
||||
ThemePreference: member.UserThemePreference,
|
||||
Name: member.UserName,
|
||||
GithubComUserID: member.UserGithubComUserID,
|
||||
}
|
||||
}
|
||||
|
||||
func ReducedUserFromGroupMember(member database.GroupMember) codersdk.ReducedUser {
|
||||
return ReducedUser(UserFromGroupMember(member))
|
||||
}
|
||||
|
||||
func ReducedUsersFromGroupMembers(members []database.GroupMember) []codersdk.ReducedUser {
|
||||
return List(members, ReducedUserFromGroupMember)
|
||||
}
|
||||
|
||||
func ReducedUsers(users []database.User) []codersdk.ReducedUser {
|
||||
return List(users, ReducedUser)
|
||||
}
|
||||
@@ -179,16 +208,19 @@ func Users(users []database.User, organizationIDs map[uuid.UUID][]uuid.UUID) []c
|
||||
})
|
||||
}
|
||||
|
||||
func Group(group database.Group, members []database.User) codersdk.Group {
|
||||
func Group(row database.GetGroupsRow, members []database.GroupMember, totalMemberCount int) codersdk.Group {
|
||||
return codersdk.Group{
|
||||
ID: group.ID,
|
||||
Name: group.Name,
|
||||
DisplayName: group.DisplayName,
|
||||
OrganizationID: group.OrganizationID,
|
||||
AvatarURL: group.AvatarURL,
|
||||
Members: ReducedUsers(members),
|
||||
QuotaAllowance: int(group.QuotaAllowance),
|
||||
Source: codersdk.GroupSource(group.Source),
|
||||
ID: row.Group.ID,
|
||||
Name: row.Group.Name,
|
||||
DisplayName: row.Group.DisplayName,
|
||||
OrganizationID: row.Group.OrganizationID,
|
||||
AvatarURL: row.Group.AvatarURL,
|
||||
Members: ReducedUsersFromGroupMembers(members),
|
||||
TotalMemberCount: totalMemberCount,
|
||||
QuotaAllowance: int(row.Group.QuotaAllowance),
|
||||
Source: codersdk.GroupSource(row.Group.Source),
|
||||
OrganizationName: row.OrganizationName,
|
||||
OrganizationDisplayName: row.OrganizationDisplayName,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,8 +19,8 @@ import (
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
// TestUpsertCustomRoles verifies creating custom roles cannot escalate permissions.
|
||||
func TestUpsertCustomRoles(t *testing.T) {
|
||||
// TestInsertCustomRoles verifies creating custom roles cannot escalate permissions.
|
||||
func TestInsertCustomRoles(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
userID := uuid.New()
|
||||
@@ -98,7 +98,7 @@ func TestUpsertCustomRoles(t *testing.T) {
|
||||
org: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
|
||||
codersdk.ResourceWorkspace: {codersdk.ActionRead},
|
||||
}),
|
||||
errorContains: "cannot assign both org and site permissions",
|
||||
errorContains: "organization roles specify site or user permissions",
|
||||
},
|
||||
{
|
||||
name: "invalid-action",
|
||||
@@ -231,7 +231,7 @@ func TestUpsertCustomRoles(t *testing.T) {
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
ctx = dbauthz.As(ctx, subject)
|
||||
|
||||
_, err := az.UpsertCustomRole(ctx, database.UpsertCustomRoleParams{
|
||||
_, err := az.InsertCustomRole(ctx, database.InsertCustomRoleParams{
|
||||
Name: "test-role",
|
||||
DisplayName: "",
|
||||
OrganizationID: tc.organizationID,
|
||||
|
||||
+273
-123
@@ -236,20 +236,23 @@ var (
|
||||
Identifier: rbac.RoleIdentifier{Name: "system"},
|
||||
DisplayName: "Coder",
|
||||
Site: rbac.Permissions(map[string][]policy.Action{
|
||||
rbac.ResourceWildcard.Type: {policy.ActionRead},
|
||||
rbac.ResourceApiKey.Type: rbac.ResourceApiKey.AvailableActions(),
|
||||
rbac.ResourceGroup.Type: {policy.ActionCreate, policy.ActionUpdate},
|
||||
rbac.ResourceAssignRole.Type: rbac.ResourceAssignRole.AvailableActions(),
|
||||
rbac.ResourceAssignOrgRole.Type: rbac.ResourceAssignOrgRole.AvailableActions(),
|
||||
rbac.ResourceSystem.Type: {policy.WildcardSymbol},
|
||||
rbac.ResourceOrganization.Type: {policy.ActionCreate, policy.ActionRead},
|
||||
rbac.ResourceOrganizationMember.Type: {policy.ActionCreate},
|
||||
rbac.ResourceProvisionerDaemon.Type: {policy.ActionCreate, policy.ActionUpdate},
|
||||
rbac.ResourceProvisionerKeys.Type: {policy.ActionCreate, policy.ActionRead, policy.ActionDelete},
|
||||
rbac.ResourceUser.Type: rbac.ResourceUser.AvailableActions(),
|
||||
rbac.ResourceWorkspaceDormant.Type: {policy.ActionUpdate, policy.ActionDelete, policy.ActionWorkspaceStop},
|
||||
rbac.ResourceWorkspace.Type: {policy.ActionUpdate, policy.ActionDelete, policy.ActionWorkspaceStart, policy.ActionWorkspaceStop, policy.ActionSSH},
|
||||
rbac.ResourceWorkspaceProxy.Type: {policy.ActionCreate, policy.ActionUpdate, policy.ActionDelete},
|
||||
rbac.ResourceWildcard.Type: {policy.ActionRead},
|
||||
rbac.ResourceApiKey.Type: rbac.ResourceApiKey.AvailableActions(),
|
||||
rbac.ResourceGroup.Type: {policy.ActionCreate, policy.ActionUpdate},
|
||||
rbac.ResourceAssignRole.Type: rbac.ResourceAssignRole.AvailableActions(),
|
||||
rbac.ResourceAssignOrgRole.Type: rbac.ResourceAssignOrgRole.AvailableActions(),
|
||||
rbac.ResourceSystem.Type: {policy.WildcardSymbol},
|
||||
rbac.ResourceOrganization.Type: {policy.ActionCreate, policy.ActionRead},
|
||||
rbac.ResourceOrganizationMember.Type: {policy.ActionCreate, policy.ActionDelete, policy.ActionRead},
|
||||
rbac.ResourceProvisionerDaemon.Type: {policy.ActionCreate, policy.ActionUpdate},
|
||||
rbac.ResourceProvisionerKeys.Type: {policy.ActionCreate, policy.ActionRead, policy.ActionDelete},
|
||||
rbac.ResourceUser.Type: rbac.ResourceUser.AvailableActions(),
|
||||
rbac.ResourceWorkspaceDormant.Type: {policy.ActionUpdate, policy.ActionDelete, policy.ActionWorkspaceStop},
|
||||
rbac.ResourceWorkspace.Type: {policy.ActionUpdate, policy.ActionDelete, policy.ActionWorkspaceStart, policy.ActionWorkspaceStop, policy.ActionSSH},
|
||||
rbac.ResourceWorkspaceProxy.Type: {policy.ActionCreate, policy.ActionUpdate, policy.ActionDelete},
|
||||
rbac.ResourceDeploymentConfig.Type: {policy.ActionCreate, policy.ActionUpdate, policy.ActionDelete},
|
||||
rbac.ResourceNotificationPreference.Type: {policy.ActionCreate, policy.ActionUpdate, policy.ActionDelete},
|
||||
rbac.ResourceNotificationTemplate.Type: {policy.ActionCreate, policy.ActionUpdate, policy.ActionDelete},
|
||||
}),
|
||||
Org: map[string][]rbac.Permission{},
|
||||
User: []rbac.Permission{},
|
||||
@@ -815,6 +818,86 @@ func (q *querier) customRoleEscalationCheck(ctx context.Context, actor rbac.Subj
|
||||
return nil
|
||||
}
|
||||
|
||||
// customRoleCheck will validate a custom role for inserting or updating.
|
||||
// If the role is not valid, an error will be returned.
|
||||
// - Check custom roles are valid for their resource types + actions
|
||||
// - Check the actor can create the custom role
|
||||
// - Check the custom role does not grant perms the actor does not have
|
||||
// - Prevent negative perms
|
||||
// - Prevent roles with site and org permissions.
|
||||
func (q *querier) customRoleCheck(ctx context.Context, role database.CustomRole) error {
|
||||
act, ok := ActorFromContext(ctx)
|
||||
if !ok {
|
||||
return NoActorError
|
||||
}
|
||||
|
||||
// Org permissions require an org role
|
||||
if role.OrganizationID.UUID == uuid.Nil && len(role.OrgPermissions) > 0 {
|
||||
return xerrors.Errorf("organization permissions require specifying an organization id")
|
||||
}
|
||||
|
||||
// Org roles can only specify org permissions
|
||||
if role.OrganizationID.UUID != uuid.Nil && (len(role.SitePermissions) > 0 || len(role.UserPermissions) > 0) {
|
||||
return xerrors.Errorf("organization roles specify site or user permissions")
|
||||
}
|
||||
|
||||
// The rbac.Role has a 'Valid()' function on it that will do a lot
|
||||
// of checks.
|
||||
rbacRole, err := rolestore.ConvertDBRole(database.CustomRole{
|
||||
Name: role.Name,
|
||||
DisplayName: role.DisplayName,
|
||||
SitePermissions: role.SitePermissions,
|
||||
OrgPermissions: role.OrgPermissions,
|
||||
UserPermissions: role.UserPermissions,
|
||||
OrganizationID: role.OrganizationID,
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("invalid args: %w", err)
|
||||
}
|
||||
|
||||
err = rbacRole.Valid()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("invalid role: %w", err)
|
||||
}
|
||||
|
||||
if len(rbacRole.Org) > 0 && len(rbacRole.Site) > 0 {
|
||||
// This is a choice to keep roles simple. If we allow mixing site and org scoped perms, then knowing who can
|
||||
// do what gets more complicated.
|
||||
return xerrors.Errorf("invalid custom role, cannot assign both org and site permissions at the same time")
|
||||
}
|
||||
|
||||
if len(rbacRole.Org) > 1 {
|
||||
// Again to avoid more complexity in our roles
|
||||
return xerrors.Errorf("invalid custom role, cannot assign permissions to more than 1 org at a time")
|
||||
}
|
||||
|
||||
// Prevent escalation
|
||||
for _, sitePerm := range rbacRole.Site {
|
||||
err := q.customRoleEscalationCheck(ctx, act, sitePerm, rbac.Object{Type: sitePerm.ResourceType})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("site permission: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for orgID, perms := range rbacRole.Org {
|
||||
for _, orgPerm := range perms {
|
||||
err := q.customRoleEscalationCheck(ctx, act, orgPerm, rbac.Object{OrgID: orgID, Type: orgPerm.ResourceType})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("org=%q: %w", orgID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, userPerm := range rbacRole.User {
|
||||
err := q.customRoleEscalationCheck(ctx, act, userPerm, rbac.Object{Type: userPerm.ResourceType, Owner: act.ID})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("user permission: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *querier) AcquireLock(ctx context.Context, id int64) error {
|
||||
return q.db.AcquireLock(ctx, id)
|
||||
}
|
||||
@@ -958,6 +1041,20 @@ func (q *querier) DeleteCoordinator(ctx context.Context, id uuid.UUID) error {
|
||||
return q.db.DeleteCoordinator(ctx, id)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteCustomRole(ctx context.Context, arg database.DeleteCustomRoleParams) error {
|
||||
if arg.OrganizationID.UUID != uuid.Nil {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceAssignOrgRole.InOrg(arg.OrganizationID.UUID)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceAssignRole); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return q.db.DeleteCustomRole(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteExternalAuthLink(ctx context.Context, arg database.DeleteExternalAuthLinkParams) error {
|
||||
return fetchAndExec(q.log, q.auth, policy.ActionUpdatePersonal, func(ctx context.Context, arg database.DeleteExternalAuthLinkParams) (database.ExternalAuthLink, error) {
|
||||
//nolint:gosimple
|
||||
@@ -1047,11 +1144,11 @@ func (q *querier) DeleteOldProvisionerDaemons(ctx context.Context) error {
|
||||
return q.db.DeleteOldProvisionerDaemons(ctx)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteOldWorkspaceAgentLogs(ctx context.Context) error {
|
||||
func (q *querier) DeleteOldWorkspaceAgentLogs(ctx context.Context, threshold time.Time) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceSystem); err != nil {
|
||||
return err
|
||||
}
|
||||
return q.db.DeleteOldWorkspaceAgentLogs(ctx)
|
||||
return q.db.DeleteOldWorkspaceAgentLogs(ctx, threshold)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteOldWorkspaceAgentStats(ctx context.Context) error {
|
||||
@@ -1238,7 +1335,9 @@ func (q *querier) GetAnnouncementBanners(ctx context.Context) (string, error) {
|
||||
}
|
||||
|
||||
func (q *querier) GetAppSecurityKey(ctx context.Context) (string, error) {
|
||||
// No authz checks
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceSystem); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return q.db.GetAppSecurityKey(ctx)
|
||||
}
|
||||
|
||||
@@ -1248,6 +1347,13 @@ func (q *querier) GetApplicationName(ctx context.Context) (string, error) {
|
||||
}
|
||||
|
||||
func (q *querier) GetAuditLogsOffset(ctx context.Context, arg database.GetAuditLogsOffsetParams) ([]database.GetAuditLogsOffsetRow, error) {
|
||||
// Shortcut if the user is an owner. The SQL filter is noticeable,
|
||||
// and this is an easy win for owners. Which is the common case.
|
||||
err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceAuditLog)
|
||||
if err == nil {
|
||||
return q.db.GetAuditLogsOffset(ctx, arg)
|
||||
}
|
||||
|
||||
prep, err := prepareSQLFilter(ctx, q.auth, policy.ActionRead, rbac.ResourceAuditLog.Type)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("(dev error) prepare sql filter: %w", err)
|
||||
@@ -1263,6 +1369,13 @@ func (q *querier) GetAuthorizationUserRoles(ctx context.Context, userID uuid.UUI
|
||||
return q.db.GetAuthorizationUserRoles(ctx, userID)
|
||||
}
|
||||
|
||||
func (q *querier) GetCoordinatorResumeTokenSigningKey(ctx context.Context) (string, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceSystem); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return q.db.GetCoordinatorResumeTokenSigningKey(ctx)
|
||||
}
|
||||
|
||||
func (q *querier) GetDBCryptKeys(ctx context.Context) ([]database.DBCryptKey, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceSystem); err != nil {
|
||||
return nil, err
|
||||
@@ -1375,26 +1488,31 @@ func (q *querier) GetGroupMembers(ctx context.Context) ([]database.GroupMember,
|
||||
return q.db.GetGroupMembers(ctx)
|
||||
}
|
||||
|
||||
func (q *querier) GetGroupMembersByGroupID(ctx context.Context, id uuid.UUID) ([]database.User, error) {
|
||||
if _, err := q.GetGroupByID(ctx, id); err != nil { // AuthZ check
|
||||
return nil, err
|
||||
func (q *querier) GetGroupMembersByGroupID(ctx context.Context, id uuid.UUID) ([]database.GroupMember, error) {
|
||||
return fetchWithPostFilter(q.auth, policy.ActionRead, q.db.GetGroupMembersByGroupID)(ctx, id)
|
||||
}
|
||||
|
||||
func (q *querier) GetGroupMembersCountByGroupID(ctx context.Context, groupID uuid.UUID) (int64, error) {
|
||||
if _, err := q.GetGroupByID(ctx, groupID); err != nil { // AuthZ check
|
||||
return 0, err
|
||||
}
|
||||
return q.db.GetGroupMembersByGroupID(ctx, id)
|
||||
}
|
||||
|
||||
func (q *querier) GetGroups(ctx context.Context) ([]database.Group, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceSystem); err != nil {
|
||||
return nil, err
|
||||
memberCount, err := q.db.GetGroupMembersCountByGroupID(ctx, groupID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return q.db.GetGroups(ctx)
|
||||
return memberCount, nil
|
||||
}
|
||||
|
||||
func (q *querier) GetGroupsByOrganizationAndUserID(ctx context.Context, arg database.GetGroupsByOrganizationAndUserIDParams) ([]database.Group, error) {
|
||||
return fetchWithPostFilter(q.auth, policy.ActionRead, q.db.GetGroupsByOrganizationAndUserID)(ctx, arg)
|
||||
}
|
||||
func (q *querier) GetGroups(ctx context.Context, arg database.GetGroupsParams) ([]database.GetGroupsRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceSystem); err == nil {
|
||||
// Optimize this query for system users as it is used in telemetry.
|
||||
// Calling authz on all groups in a deployment for telemetry jobs is
|
||||
// excessive. Most user calls should have some filtering applied to reduce
|
||||
// the size of the set.
|
||||
return q.db.GetGroups(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) GetGroupsByOrganizationID(ctx context.Context, organizationID uuid.UUID) ([]database.Group, error) {
|
||||
return fetchWithPostFilter(q.auth, policy.ActionRead, q.db.GetGroupsByOrganizationID)(ctx, organizationID)
|
||||
return fetchWithPostFilter(q.auth, policy.ActionRead, q.db.GetGroups)(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) GetHealthSettings(ctx context.Context) (string, error) {
|
||||
@@ -1474,6 +1592,23 @@ func (q *querier) GetNotificationMessagesByStatus(ctx context.Context, arg datab
|
||||
return q.db.GetNotificationMessagesByStatus(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) GetNotificationTemplateByID(ctx context.Context, id uuid.UUID) (database.NotificationTemplate, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceNotificationTemplate); err != nil {
|
||||
return database.NotificationTemplate{}, err
|
||||
}
|
||||
return q.db.GetNotificationTemplateByID(ctx, id)
|
||||
}
|
||||
|
||||
func (q *querier) GetNotificationTemplatesByKind(ctx context.Context, kind database.NotificationTemplateKind) ([]database.NotificationTemplate, error) {
|
||||
// Anyone can read the system notification templates.
|
||||
if kind == database.NotificationTemplateKindSystem {
|
||||
return q.db.GetNotificationTemplatesByKind(ctx, kind)
|
||||
}
|
||||
|
||||
// TODO(dannyk): handle template ownership when we support user-default notification templates.
|
||||
return nil, sql.ErrNoRows
|
||||
}
|
||||
|
||||
func (q *querier) GetNotificationsSettings(ctx context.Context) (string, error) {
|
||||
// No authz checks
|
||||
return q.db.GetNotificationsSettings(ctx)
|
||||
@@ -1565,9 +1700,9 @@ func (q *querier) GetOrganizationIDsByMemberIDs(ctx context.Context, ids []uuid.
|
||||
return fetchWithPostFilter(q.auth, policy.ActionRead, q.db.GetOrganizationIDsByMemberIDs)(ctx, ids)
|
||||
}
|
||||
|
||||
func (q *querier) GetOrganizations(ctx context.Context) ([]database.Organization, error) {
|
||||
func (q *querier) GetOrganizations(ctx context.Context, args database.GetOrganizationsParams) ([]database.Organization, error) {
|
||||
fetch := func(ctx context.Context, _ interface{}) ([]database.Organization, error) {
|
||||
return q.db.GetOrganizations(ctx)
|
||||
return q.db.GetOrganizations(ctx, args)
|
||||
}
|
||||
return fetchWithPostFilter(q.auth, policy.ActionRead, fetch)(ctx, nil)
|
||||
}
|
||||
@@ -1691,20 +1826,20 @@ func (q *querier) GetProvisionerLogsAfterID(ctx context.Context, arg database.Ge
|
||||
return q.db.GetProvisionerLogsAfterID(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) GetQuotaAllowanceForUser(ctx context.Context, userID uuid.UUID) (int64, error) {
|
||||
err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceUserObject(userID))
|
||||
func (q *querier) GetQuotaAllowanceForUser(ctx context.Context, params database.GetQuotaAllowanceForUserParams) (int64, error) {
|
||||
err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceUserObject(params.UserID))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return q.db.GetQuotaAllowanceForUser(ctx, userID)
|
||||
return q.db.GetQuotaAllowanceForUser(ctx, params)
|
||||
}
|
||||
|
||||
func (q *querier) GetQuotaConsumedForUser(ctx context.Context, userID uuid.UUID) (int64, error) {
|
||||
err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceUserObject(userID))
|
||||
func (q *querier) GetQuotaConsumedForUser(ctx context.Context, params database.GetQuotaConsumedForUserParams) (int64, error) {
|
||||
err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceUserObject(params.OwnerID))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return q.db.GetQuotaConsumedForUser(ctx, userID)
|
||||
return q.db.GetQuotaConsumedForUser(ctx, params)
|
||||
}
|
||||
|
||||
func (q *querier) GetReplicaByID(ctx context.Context, id uuid.UUID) (database.Replica, error) {
|
||||
@@ -2085,6 +2220,13 @@ func (q *querier) GetUserLinksByUserID(ctx context.Context, userID uuid.UUID) ([
|
||||
return q.db.GetUserLinksByUserID(ctx, userID)
|
||||
}
|
||||
|
||||
func (q *querier) GetUserNotificationPreferences(ctx context.Context, userID uuid.UUID) ([]database.NotificationPreference, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceNotificationPreference.WithOwner(userID.String())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q.db.GetUserNotificationPreferences(ctx, userID)
|
||||
}
|
||||
|
||||
func (q *querier) GetUserWorkspaceBuildParameters(ctx context.Context, params database.GetUserWorkspaceBuildParametersParams) ([]database.GetUserWorkspaceBuildParametersRow, error) {
|
||||
u, err := q.db.GetUserByID(ctx, params.OwnerID)
|
||||
if err != nil {
|
||||
@@ -2498,6 +2640,34 @@ func (q *querier) InsertAuditLog(ctx context.Context, arg database.InsertAuditLo
|
||||
return insert(q.log, q.auth, rbac.ResourceAuditLog, q.db.InsertAuditLog)(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) InsertCustomRole(ctx context.Context, arg database.InsertCustomRoleParams) (database.CustomRole, error) {
|
||||
// Org and site role upsert share the same query. So switch the assertion based on the org uuid.
|
||||
if arg.OrganizationID.UUID != uuid.Nil {
|
||||
if err := q.authorizeContext(ctx, policy.ActionCreate, rbac.ResourceAssignOrgRole.InOrg(arg.OrganizationID.UUID)); err != nil {
|
||||
return database.CustomRole{}, err
|
||||
}
|
||||
} else {
|
||||
if err := q.authorizeContext(ctx, policy.ActionCreate, rbac.ResourceAssignRole); err != nil {
|
||||
return database.CustomRole{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := q.customRoleCheck(ctx, database.CustomRole{
|
||||
Name: arg.Name,
|
||||
DisplayName: arg.DisplayName,
|
||||
SitePermissions: arg.SitePermissions,
|
||||
OrgPermissions: arg.OrgPermissions,
|
||||
UserPermissions: arg.UserPermissions,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
OrganizationID: arg.OrganizationID,
|
||||
ID: uuid.New(),
|
||||
}); err != nil {
|
||||
return database.CustomRole{}, err
|
||||
}
|
||||
return q.db.InsertCustomRole(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) InsertDBCryptKey(ctx context.Context, arg database.InsertDBCryptKeyParams) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionCreate, rbac.ResourceSystem); err != nil {
|
||||
return err
|
||||
@@ -2626,6 +2796,14 @@ func (q *querier) InsertProvisionerJobLogs(ctx context.Context, arg database.Ins
|
||||
return q.db.InsertProvisionerJobLogs(ctx, arg)
|
||||
}
|
||||
|
||||
// TODO: We need to create a ProvisionerJob resource type
|
||||
func (q *querier) InsertProvisionerJobTimings(ctx context.Context, arg database.InsertProvisionerJobTimingsParams) ([]database.ProvisionerJobTiming, error) {
|
||||
// if err := q.authorizeContext(ctx, policy.ActionCreate, rbac.ResourceSystem); err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
return q.db.InsertProvisionerJobTimings(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) InsertProvisionerKey(ctx context.Context, arg database.InsertProvisionerKeyParams) (database.ProvisionerKey, error) {
|
||||
return insert(q.log, q.auth, rbac.ResourceProvisionerKeys.InOrg(arg.OrganizationID).WithID(arg.ID), q.db.InsertProvisionerKey)(ctx, arg)
|
||||
}
|
||||
@@ -2949,6 +3127,33 @@ func (q *querier) UpdateAPIKeyByID(ctx context.Context, arg database.UpdateAPIKe
|
||||
return update(q.log, q.auth, fetch, q.db.UpdateAPIKeyByID)(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateCustomRole(ctx context.Context, arg database.UpdateCustomRoleParams) (database.CustomRole, error) {
|
||||
if arg.OrganizationID.UUID != uuid.Nil {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceAssignOrgRole.InOrg(arg.OrganizationID.UUID)); err != nil {
|
||||
return database.CustomRole{}, err
|
||||
}
|
||||
} else {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceAssignRole); err != nil {
|
||||
return database.CustomRole{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := q.customRoleCheck(ctx, database.CustomRole{
|
||||
Name: arg.Name,
|
||||
DisplayName: arg.DisplayName,
|
||||
SitePermissions: arg.SitePermissions,
|
||||
OrgPermissions: arg.OrgPermissions,
|
||||
UserPermissions: arg.UserPermissions,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
OrganizationID: arg.OrganizationID,
|
||||
ID: uuid.New(),
|
||||
}); err != nil {
|
||||
return database.CustomRole{}, err
|
||||
}
|
||||
return q.db.UpdateCustomRole(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateExternalAuthLink(ctx context.Context, arg database.UpdateExternalAuthLinkParams) (database.ExternalAuthLink, error) {
|
||||
fetch := func(ctx context.Context, arg database.UpdateExternalAuthLinkParams) (database.ExternalAuthLink, error) {
|
||||
return q.db.GetExternalAuthLink(ctx, database.GetExternalAuthLinkParams{UserID: arg.UserID, ProviderID: arg.ProviderID})
|
||||
@@ -3011,6 +3216,13 @@ func (q *querier) UpdateMemberRoles(ctx context.Context, arg database.UpdateMemb
|
||||
return q.db.UpdateMemberRoles(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateNotificationTemplateMethodByID(ctx context.Context, arg database.UpdateNotificationTemplateMethodByIDParams) (database.NotificationTemplate, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceNotificationTemplate); err != nil {
|
||||
return database.NotificationTemplate{}, err
|
||||
}
|
||||
return q.db.UpdateNotificationTemplateMethodByID(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateOAuth2ProviderAppByID(ctx context.Context, arg database.UpdateOAuth2ProviderAppByIDParams) (database.OAuth2ProviderApp, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceOauth2App); err != nil {
|
||||
return database.OAuth2ProviderApp{}, err
|
||||
@@ -3129,6 +3341,13 @@ func (q *querier) UpdateReplica(ctx context.Context, arg database.UpdateReplicaP
|
||||
return q.db.UpdateReplica(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateTailnetPeerStatusByCoordinator(ctx context.Context, arg database.UpdateTailnetPeerStatusByCoordinatorParams) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return err
|
||||
}
|
||||
return q.db.UpdateTailnetPeerStatusByCoordinator(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateTemplateACLByID(ctx context.Context, arg database.UpdateTemplateACLByIDParams) error {
|
||||
fetch := func(ctx context.Context, arg database.UpdateTemplateACLByIDParams) (database.Template, error) {
|
||||
return q.db.GetTemplateByID(ctx, arg.ID)
|
||||
@@ -3326,6 +3545,13 @@ func (q *querier) UpdateUserLoginType(ctx context.Context, arg database.UpdateUs
|
||||
return q.db.UpdateUserLoginType(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateUserNotificationPreferences(ctx context.Context, arg database.UpdateUserNotificationPreferencesParams) (int64, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceNotificationPreference.WithOwner(arg.UserID.String())); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return q.db.UpdateUserNotificationPreferences(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateUserProfile(ctx context.Context, arg database.UpdateUserProfileParams) (database.User, error) {
|
||||
u, err := q.db.GetUserByID(ctx, arg.ID)
|
||||
if err != nil {
|
||||
@@ -3586,7 +3812,9 @@ func (q *querier) UpsertAnnouncementBanners(ctx context.Context, value string) e
|
||||
}
|
||||
|
||||
func (q *querier) UpsertAppSecurityKey(ctx context.Context, data string) error {
|
||||
// No authz checks as this is done during startup
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceSystem); err != nil {
|
||||
return err
|
||||
}
|
||||
return q.db.UpsertAppSecurityKey(ctx, data)
|
||||
}
|
||||
|
||||
@@ -3597,89 +3825,11 @@ func (q *querier) UpsertApplicationName(ctx context.Context, value string) error
|
||||
return q.db.UpsertApplicationName(ctx, value)
|
||||
}
|
||||
|
||||
// UpsertCustomRole does a series of authz checks to protect custom roles.
|
||||
// - Check custom roles are valid for their resource types + actions
|
||||
// - Check the actor can create the custom role
|
||||
// - Check the custom role does not grant perms the actor does not have
|
||||
// - Prevent negative perms
|
||||
// - Prevent roles with site and org permissions.
|
||||
func (q *querier) UpsertCustomRole(ctx context.Context, arg database.UpsertCustomRoleParams) (database.CustomRole, error) {
|
||||
act, ok := ActorFromContext(ctx)
|
||||
if !ok {
|
||||
return database.CustomRole{}, NoActorError
|
||||
func (q *querier) UpsertCoordinatorResumeTokenSigningKey(ctx context.Context, value string) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceSystem); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Org and site role upsert share the same query. So switch the assertion based on the org uuid.
|
||||
if arg.OrganizationID.UUID != uuid.Nil {
|
||||
if err := q.authorizeContext(ctx, policy.ActionCreate, rbac.ResourceAssignOrgRole.InOrg(arg.OrganizationID.UUID)); err != nil {
|
||||
return database.CustomRole{}, err
|
||||
}
|
||||
} else {
|
||||
if err := q.authorizeContext(ctx, policy.ActionCreate, rbac.ResourceAssignRole); err != nil {
|
||||
return database.CustomRole{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if arg.OrganizationID.UUID == uuid.Nil && len(arg.OrgPermissions) > 0 {
|
||||
return database.CustomRole{}, xerrors.Errorf("organization permissions require specifying an organization id")
|
||||
}
|
||||
|
||||
// There is quite a bit of validation we should do here.
|
||||
// The rbac.Role has a 'Valid()' function on it that will do a lot
|
||||
// of checks.
|
||||
rbacRole, err := rolestore.ConvertDBRole(database.CustomRole{
|
||||
Name: arg.Name,
|
||||
DisplayName: arg.DisplayName,
|
||||
SitePermissions: arg.SitePermissions,
|
||||
OrgPermissions: arg.OrgPermissions,
|
||||
UserPermissions: arg.UserPermissions,
|
||||
OrganizationID: arg.OrganizationID,
|
||||
})
|
||||
if err != nil {
|
||||
return database.CustomRole{}, xerrors.Errorf("invalid args: %w", err)
|
||||
}
|
||||
|
||||
err = rbacRole.Valid()
|
||||
if err != nil {
|
||||
return database.CustomRole{}, xerrors.Errorf("invalid role: %w", err)
|
||||
}
|
||||
|
||||
if len(rbacRole.Org) > 0 && len(rbacRole.Site) > 0 {
|
||||
// This is a choice to keep roles simple. If we allow mixing site and org scoped perms, then knowing who can
|
||||
// do what gets more complicated.
|
||||
return database.CustomRole{}, xerrors.Errorf("invalid custom role, cannot assign both org and site permissions at the same time")
|
||||
}
|
||||
|
||||
if len(rbacRole.Org) > 1 {
|
||||
// Again to avoid more complexity in our roles
|
||||
return database.CustomRole{}, xerrors.Errorf("invalid custom role, cannot assign permissions to more than 1 org at a time")
|
||||
}
|
||||
|
||||
// Prevent escalation
|
||||
for _, sitePerm := range rbacRole.Site {
|
||||
err := q.customRoleEscalationCheck(ctx, act, sitePerm, rbac.Object{Type: sitePerm.ResourceType})
|
||||
if err != nil {
|
||||
return database.CustomRole{}, xerrors.Errorf("site permission: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for orgID, perms := range rbacRole.Org {
|
||||
for _, orgPerm := range perms {
|
||||
err := q.customRoleEscalationCheck(ctx, act, orgPerm, rbac.Object{OrgID: orgID, Type: orgPerm.ResourceType})
|
||||
if err != nil {
|
||||
return database.CustomRole{}, xerrors.Errorf("org=%q: %w", orgID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, userPerm := range rbacRole.User {
|
||||
err := q.customRoleEscalationCheck(ctx, act, userPerm, rbac.Object{Type: userPerm.ResourceType, Owner: act.ID})
|
||||
if err != nil {
|
||||
return database.CustomRole{}, xerrors.Errorf("user permission: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return q.db.UpsertCustomRole(ctx, arg)
|
||||
return q.db.UpsertCoordinatorResumeTokenSigningKey(ctx, value)
|
||||
}
|
||||
|
||||
func (q *querier) UpsertDefaultProxy(ctx context.Context, arg database.UpsertDefaultProxyParams) error {
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database/db2sdk"
|
||||
"github.com/coder/coder/v2/coderd/notifications"
|
||||
"github.com/coder/coder/v2/coderd/rbac/policy"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
|
||||
@@ -80,7 +81,7 @@ func TestInTX(t *testing.T) {
|
||||
|
||||
db := dbmem.New()
|
||||
q := dbauthz.New(db, &coderdtest.RecordingAuthorizer{
|
||||
Wrapped: &coderdtest.FakeAuthorizer{AlwaysReturn: xerrors.New("custom error")},
|
||||
Wrapped: (&coderdtest.FakeAuthorizer{}).AlwaysReturn(xerrors.New("custom error")),
|
||||
}, slog.Make(), coderdtest.AccessControlStorePointer())
|
||||
actor := rbac.Subject{
|
||||
ID: uuid.NewString(),
|
||||
@@ -109,7 +110,7 @@ func TestNew(t *testing.T) {
|
||||
db = dbmem.New()
|
||||
exp = dbgen.Workspace(t, db, database.Workspace{})
|
||||
rec = &coderdtest.RecordingAuthorizer{
|
||||
Wrapped: &coderdtest.FakeAuthorizer{AlwaysReturn: nil},
|
||||
Wrapped: &coderdtest.FakeAuthorizer{},
|
||||
}
|
||||
subj = rbac.Subject{}
|
||||
ctx = dbauthz.As(context.Background(), rbac.Subject{})
|
||||
@@ -134,7 +135,7 @@ func TestNew(t *testing.T) {
|
||||
func TestDBAuthzRecursive(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := dbauthz.New(dbmem.New(), &coderdtest.RecordingAuthorizer{
|
||||
Wrapped: &coderdtest.FakeAuthorizer{AlwaysReturn: nil},
|
||||
Wrapped: &coderdtest.FakeAuthorizer{},
|
||||
}, slog.Make(), coderdtest.AccessControlStorePointer())
|
||||
actor := rbac.Subject{
|
||||
ID: uuid.NewString(),
|
||||
@@ -266,14 +267,14 @@ func (s *MethodTestSuite) TestAuditLogs() {
|
||||
_ = dbgen.AuditLog(s.T(), db, database.AuditLog{})
|
||||
check.Args(database.GetAuditLogsOffsetParams{
|
||||
LimitOpt: 10,
|
||||
}).Asserts()
|
||||
}).Asserts(rbac.ResourceAuditLog, policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetAuthorizedAuditLogsOffset", s.Subtest(func(db database.Store, check *expects) {
|
||||
_ = dbgen.AuditLog(s.T(), db, database.AuditLog{})
|
||||
_ = dbgen.AuditLog(s.T(), db, database.AuditLog{})
|
||||
check.Args(database.GetAuditLogsOffsetParams{
|
||||
LimitOpt: 10,
|
||||
}, emptyPreparedAuthorized{}).Asserts()
|
||||
}, emptyPreparedAuthorized{}).Asserts(rbac.ResourceAuditLog, policy.ActionRead)
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -304,8 +305,10 @@ func (s *MethodTestSuite) TestGroup() {
|
||||
}))
|
||||
s.Run("DeleteGroupMemberFromGroup", s.Subtest(func(db database.Store, check *expects) {
|
||||
g := dbgen.Group(s.T(), db, database.Group{})
|
||||
m := dbgen.GroupMember(s.T(), db, database.GroupMember{
|
||||
u := dbgen.User(s.T(), db, database.User{})
|
||||
m := dbgen.GroupMember(s.T(), db, database.GroupMemberTable{
|
||||
GroupID: g.ID,
|
||||
UserID: u.ID,
|
||||
})
|
||||
check.Args(database.DeleteGroupMemberFromGroupParams{
|
||||
UserID: m.UserID,
|
||||
@@ -325,24 +328,35 @@ func (s *MethodTestSuite) TestGroup() {
|
||||
}))
|
||||
s.Run("GetGroupMembersByGroupID", s.Subtest(func(db database.Store, check *expects) {
|
||||
g := dbgen.Group(s.T(), db, database.Group{})
|
||||
_ = dbgen.GroupMember(s.T(), db, database.GroupMember{})
|
||||
u := dbgen.User(s.T(), db, database.User{})
|
||||
gm := dbgen.GroupMember(s.T(), db, database.GroupMemberTable{GroupID: g.ID, UserID: u.ID})
|
||||
check.Args(g.ID).Asserts(gm, policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetGroupMembersCountByGroupID", s.Subtest(func(db database.Store, check *expects) {
|
||||
g := dbgen.Group(s.T(), db, database.Group{})
|
||||
check.Args(g.ID).Asserts(g, policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetGroupMembers", s.Subtest(func(db database.Store, check *expects) {
|
||||
_ = dbgen.GroupMember(s.T(), db, database.GroupMember{})
|
||||
g := dbgen.Group(s.T(), db, database.Group{})
|
||||
u := dbgen.User(s.T(), db, database.User{})
|
||||
dbgen.GroupMember(s.T(), db, database.GroupMemberTable{GroupID: g.ID, UserID: u.ID})
|
||||
check.Asserts(rbac.ResourceSystem, policy.ActionRead)
|
||||
}))
|
||||
s.Run("System/GetGroups", s.Subtest(func(db database.Store, check *expects) {
|
||||
_ = dbgen.Group(s.T(), db, database.Group{})
|
||||
check.Args(database.GetGroupsParams{}).
|
||||
Asserts(rbac.ResourceSystem, policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetGroups", s.Subtest(func(db database.Store, check *expects) {
|
||||
_ = dbgen.Group(s.T(), db, database.Group{})
|
||||
check.Asserts(rbac.ResourceSystem, policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetGroupsByOrganizationAndUserID", s.Subtest(func(db database.Store, check *expects) {
|
||||
g := dbgen.Group(s.T(), db, database.Group{})
|
||||
gm := dbgen.GroupMember(s.T(), db, database.GroupMember{GroupID: g.ID})
|
||||
check.Args(database.GetGroupsByOrganizationAndUserIDParams{
|
||||
u := dbgen.User(s.T(), db, database.User{})
|
||||
gm := dbgen.GroupMember(s.T(), db, database.GroupMemberTable{GroupID: g.ID, UserID: u.ID})
|
||||
check.Args(database.GetGroupsParams{
|
||||
OrganizationID: g.OrganizationID,
|
||||
UserID: gm.UserID,
|
||||
}).Asserts(g, policy.ActionRead)
|
||||
HasMemberID: gm.UserID,
|
||||
}).Asserts(rbac.ResourceSystem, policy.ActionRead, g, policy.ActionRead).
|
||||
// Fail the system resource skip
|
||||
FailSystemObjectChecks()
|
||||
}))
|
||||
s.Run("InsertAllUsersGroup", s.Subtest(func(db database.Store, check *expects) {
|
||||
o := dbgen.Organization(s.T(), db, database.Organization{})
|
||||
@@ -367,7 +381,7 @@ func (s *MethodTestSuite) TestGroup() {
|
||||
u1 := dbgen.User(s.T(), db, database.User{})
|
||||
g1 := dbgen.Group(s.T(), db, database.Group{OrganizationID: o.ID})
|
||||
g2 := dbgen.Group(s.T(), db, database.Group{OrganizationID: o.ID})
|
||||
_ = dbgen.GroupMember(s.T(), db, database.GroupMember{GroupID: g1.ID, UserID: u1.ID})
|
||||
_ = dbgen.GroupMember(s.T(), db, database.GroupMemberTable{GroupID: g1.ID, UserID: u1.ID})
|
||||
check.Args(database.InsertUserGroupsByNameParams{
|
||||
OrganizationID: o.ID,
|
||||
UserID: u1.ID,
|
||||
@@ -379,8 +393,8 @@ func (s *MethodTestSuite) TestGroup() {
|
||||
u1 := dbgen.User(s.T(), db, database.User{})
|
||||
g1 := dbgen.Group(s.T(), db, database.Group{OrganizationID: o.ID})
|
||||
g2 := dbgen.Group(s.T(), db, database.Group{OrganizationID: o.ID})
|
||||
_ = dbgen.GroupMember(s.T(), db, database.GroupMember{GroupID: g1.ID, UserID: u1.ID})
|
||||
_ = dbgen.GroupMember(s.T(), db, database.GroupMember{GroupID: g2.ID, UserID: u1.ID})
|
||||
_ = dbgen.GroupMember(s.T(), db, database.GroupMemberTable{GroupID: g1.ID, UserID: u1.ID})
|
||||
_ = dbgen.GroupMember(s.T(), db, database.GroupMemberTable{GroupID: g2.ID, UserID: u1.ID})
|
||||
check.Args(u1.ID).Asserts(rbac.ResourceSystem, policy.ActionUpdate).Returns()
|
||||
}))
|
||||
s.Run("UpdateGroupByID", s.Subtest(func(db database.Store, check *expects) {
|
||||
@@ -586,12 +600,19 @@ func (s *MethodTestSuite) TestLicense() {
|
||||
}
|
||||
|
||||
func (s *MethodTestSuite) TestOrganization() {
|
||||
s.Run("GetGroupsByOrganizationID", s.Subtest(func(db database.Store, check *expects) {
|
||||
s.Run("ByOrganization/GetGroups", s.Subtest(func(db database.Store, check *expects) {
|
||||
o := dbgen.Organization(s.T(), db, database.Organization{})
|
||||
a := dbgen.Group(s.T(), db, database.Group{OrganizationID: o.ID})
|
||||
b := dbgen.Group(s.T(), db, database.Group{OrganizationID: o.ID})
|
||||
check.Args(o.ID).Asserts(a, policy.ActionRead, b, policy.ActionRead).
|
||||
Returns([]database.Group{a, b})
|
||||
check.Args(database.GetGroupsParams{
|
||||
OrganizationID: o.ID,
|
||||
}).Asserts(rbac.ResourceSystem, policy.ActionRead, a, policy.ActionRead, b, policy.ActionRead).
|
||||
Returns([]database.GetGroupsRow{
|
||||
{Group: a, OrganizationName: o.Name, OrganizationDisplayName: o.DisplayName},
|
||||
{Group: b, OrganizationName: o.Name, OrganizationDisplayName: o.DisplayName},
|
||||
}).
|
||||
// Fail the system check shortcut
|
||||
FailSystemObjectChecks()
|
||||
}))
|
||||
s.Run("GetOrganizationByID", s.Subtest(func(db database.Store, check *expects) {
|
||||
o := dbgen.Organization(s.T(), db, database.Organization{})
|
||||
@@ -617,7 +638,7 @@ func (s *MethodTestSuite) TestOrganization() {
|
||||
def, _ := db.GetDefaultOrganization(context.Background())
|
||||
a := dbgen.Organization(s.T(), db, database.Organization{})
|
||||
b := dbgen.Organization(s.T(), db, database.Organization{})
|
||||
check.Args().Asserts(def, policy.ActionRead, a, policy.ActionRead, b, policy.ActionRead).Returns(slice.New(def, a, b))
|
||||
check.Args(database.GetOrganizationsParams{}).Asserts(def, policy.ActionRead, a, policy.ActionRead, b, policy.ActionRead).Returns(slice.New(def, a, b))
|
||||
}))
|
||||
s.Run("GetOrganizationsByUserID", s.Subtest(func(db database.Store, check *expects) {
|
||||
u := dbgen.User(s.T(), db, database.User{})
|
||||
@@ -1057,11 +1078,17 @@ func (s *MethodTestSuite) TestUser() {
|
||||
}))
|
||||
s.Run("GetQuotaAllowanceForUser", s.Subtest(func(db database.Store, check *expects) {
|
||||
u := dbgen.User(s.T(), db, database.User{})
|
||||
check.Args(u.ID).Asserts(u, policy.ActionRead).Returns(int64(0))
|
||||
check.Args(database.GetQuotaAllowanceForUserParams{
|
||||
UserID: u.ID,
|
||||
OrganizationID: uuid.New(),
|
||||
}).Asserts(u, policy.ActionRead).Returns(int64(0))
|
||||
}))
|
||||
s.Run("GetQuotaConsumedForUser", s.Subtest(func(db database.Store, check *expects) {
|
||||
u := dbgen.User(s.T(), db, database.User{})
|
||||
check.Args(u.ID).Asserts(u, policy.ActionRead).Returns(int64(0))
|
||||
check.Args(database.GetQuotaConsumedForUserParams{
|
||||
OwnerID: u.ID,
|
||||
OrganizationID: uuid.New(),
|
||||
}).Asserts(u, policy.ActionRead).Returns(int64(0))
|
||||
}))
|
||||
s.Run("GetUserByEmailOrUsername", s.Subtest(func(db database.Store, check *expects) {
|
||||
u := dbgen.User(s.T(), db, database.User{})
|
||||
@@ -1246,9 +1273,102 @@ func (s *MethodTestSuite) TestUser() {
|
||||
s.Run("CustomRoles", s.Subtest(func(db database.Store, check *expects) {
|
||||
check.Args(database.CustomRolesParams{}).Asserts(rbac.ResourceAssignRole, policy.ActionRead).Returns([]database.CustomRole{})
|
||||
}))
|
||||
s.Run("Blank/UpsertCustomRole", s.Subtest(func(db database.Store, check *expects) {
|
||||
s.Run("Organization/DeleteCustomRole", s.Subtest(func(db database.Store, check *expects) {
|
||||
customRole := dbgen.CustomRole(s.T(), db, database.CustomRole{
|
||||
OrganizationID: uuid.NullUUID{
|
||||
UUID: uuid.New(),
|
||||
Valid: true,
|
||||
},
|
||||
})
|
||||
check.Args(database.DeleteCustomRoleParams{
|
||||
Name: customRole.Name,
|
||||
OrganizationID: customRole.OrganizationID,
|
||||
}).Asserts(
|
||||
rbac.ResourceAssignOrgRole.InOrg(customRole.OrganizationID.UUID), policy.ActionDelete)
|
||||
}))
|
||||
s.Run("Site/DeleteCustomRole", s.Subtest(func(db database.Store, check *expects) {
|
||||
customRole := dbgen.CustomRole(s.T(), db, database.CustomRole{
|
||||
OrganizationID: uuid.NullUUID{
|
||||
UUID: uuid.Nil,
|
||||
Valid: false,
|
||||
},
|
||||
})
|
||||
check.Args(database.DeleteCustomRoleParams{
|
||||
Name: customRole.Name,
|
||||
}).Asserts(
|
||||
rbac.ResourceAssignRole, policy.ActionDelete)
|
||||
}))
|
||||
s.Run("Blank/UpdateCustomRole", s.Subtest(func(db database.Store, check *expects) {
|
||||
customRole := dbgen.CustomRole(s.T(), db, database.CustomRole{})
|
||||
// Blank is no perms in the role
|
||||
check.Args(database.UpsertCustomRoleParams{
|
||||
check.Args(database.UpdateCustomRoleParams{
|
||||
Name: customRole.Name,
|
||||
DisplayName: "Test Name",
|
||||
SitePermissions: nil,
|
||||
OrgPermissions: nil,
|
||||
UserPermissions: nil,
|
||||
}).Asserts(rbac.ResourceAssignRole, policy.ActionUpdate)
|
||||
}))
|
||||
s.Run("SitePermissions/UpdateCustomRole", s.Subtest(func(db database.Store, check *expects) {
|
||||
customRole := dbgen.CustomRole(s.T(), db, database.CustomRole{
|
||||
OrganizationID: uuid.NullUUID{
|
||||
UUID: uuid.Nil,
|
||||
Valid: false,
|
||||
},
|
||||
})
|
||||
check.Args(database.UpdateCustomRoleParams{
|
||||
Name: customRole.Name,
|
||||
OrganizationID: customRole.OrganizationID,
|
||||
DisplayName: "Test Name",
|
||||
SitePermissions: db2sdk.List(codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
|
||||
codersdk.ResourceTemplate: {codersdk.ActionCreate, codersdk.ActionRead, codersdk.ActionUpdate, codersdk.ActionDelete, codersdk.ActionViewInsights},
|
||||
}), convertSDKPerm),
|
||||
OrgPermissions: nil,
|
||||
UserPermissions: db2sdk.List(codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
|
||||
codersdk.ResourceWorkspace: {codersdk.ActionRead},
|
||||
}), convertSDKPerm),
|
||||
}).Asserts(
|
||||
// First check
|
||||
rbac.ResourceAssignRole, policy.ActionUpdate,
|
||||
// Escalation checks
|
||||
rbac.ResourceTemplate, policy.ActionCreate,
|
||||
rbac.ResourceTemplate, policy.ActionRead,
|
||||
rbac.ResourceTemplate, policy.ActionUpdate,
|
||||
rbac.ResourceTemplate, policy.ActionDelete,
|
||||
rbac.ResourceTemplate, policy.ActionViewInsights,
|
||||
|
||||
rbac.ResourceWorkspace.WithOwner(testActorID.String()), policy.ActionRead,
|
||||
)
|
||||
}))
|
||||
s.Run("OrgPermissions/UpdateCustomRole", s.Subtest(func(db database.Store, check *expects) {
|
||||
orgID := uuid.New()
|
||||
customRole := dbgen.CustomRole(s.T(), db, database.CustomRole{
|
||||
OrganizationID: uuid.NullUUID{
|
||||
UUID: orgID,
|
||||
Valid: true,
|
||||
},
|
||||
})
|
||||
|
||||
check.Args(database.UpdateCustomRoleParams{
|
||||
Name: customRole.Name,
|
||||
DisplayName: "Test Name",
|
||||
OrganizationID: customRole.OrganizationID,
|
||||
SitePermissions: nil,
|
||||
OrgPermissions: db2sdk.List(codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
|
||||
codersdk.ResourceTemplate: {codersdk.ActionCreate, codersdk.ActionRead},
|
||||
}), convertSDKPerm),
|
||||
UserPermissions: nil,
|
||||
}).Asserts(
|
||||
// First check
|
||||
rbac.ResourceAssignOrgRole.InOrg(orgID), policy.ActionUpdate,
|
||||
// Escalation checks
|
||||
rbac.ResourceTemplate.InOrg(orgID), policy.ActionCreate,
|
||||
rbac.ResourceTemplate.InOrg(orgID), policy.ActionRead,
|
||||
)
|
||||
}))
|
||||
s.Run("Blank/InsertCustomRole", s.Subtest(func(db database.Store, check *expects) {
|
||||
// Blank is no perms in the role
|
||||
check.Args(database.InsertCustomRoleParams{
|
||||
Name: "test",
|
||||
DisplayName: "Test Name",
|
||||
SitePermissions: nil,
|
||||
@@ -1256,8 +1376,8 @@ func (s *MethodTestSuite) TestUser() {
|
||||
UserPermissions: nil,
|
||||
}).Asserts(rbac.ResourceAssignRole, policy.ActionCreate)
|
||||
}))
|
||||
s.Run("SitePermissions/UpsertCustomRole", s.Subtest(func(db database.Store, check *expects) {
|
||||
check.Args(database.UpsertCustomRoleParams{
|
||||
s.Run("SitePermissions/InsertCustomRole", s.Subtest(func(db database.Store, check *expects) {
|
||||
check.Args(database.InsertCustomRoleParams{
|
||||
Name: "test",
|
||||
DisplayName: "Test Name",
|
||||
SitePermissions: db2sdk.List(codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
|
||||
@@ -1280,9 +1400,9 @@ func (s *MethodTestSuite) TestUser() {
|
||||
rbac.ResourceWorkspace.WithOwner(testActorID.String()), policy.ActionRead,
|
||||
)
|
||||
}))
|
||||
s.Run("OrgPermissions/UpsertCustomRole", s.Subtest(func(db database.Store, check *expects) {
|
||||
s.Run("OrgPermissions/InsertCustomRole", s.Subtest(func(db database.Store, check *expects) {
|
||||
orgID := uuid.New()
|
||||
check.Args(database.UpsertCustomRoleParams{
|
||||
check.Args(database.InsertCustomRoleParams{
|
||||
Name: "test",
|
||||
DisplayName: "Test Name",
|
||||
OrganizationID: uuid.NullUUID{
|
||||
@@ -1293,17 +1413,13 @@ func (s *MethodTestSuite) TestUser() {
|
||||
OrgPermissions: db2sdk.List(codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
|
||||
codersdk.ResourceTemplate: {codersdk.ActionCreate, codersdk.ActionRead},
|
||||
}), convertSDKPerm),
|
||||
UserPermissions: db2sdk.List(codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{
|
||||
codersdk.ResourceWorkspace: {codersdk.ActionRead},
|
||||
}), convertSDKPerm),
|
||||
UserPermissions: nil,
|
||||
}).Asserts(
|
||||
// First check
|
||||
rbac.ResourceAssignOrgRole.InOrg(orgID), policy.ActionCreate,
|
||||
// Escalation checks
|
||||
rbac.ResourceTemplate.InOrg(orgID), policy.ActionCreate,
|
||||
rbac.ResourceTemplate.InOrg(orgID), policy.ActionRead,
|
||||
|
||||
rbac.ResourceWorkspace.WithOwner(testActorID.String()), policy.ActionRead,
|
||||
)
|
||||
}))
|
||||
}
|
||||
@@ -2052,6 +2168,11 @@ func (s *MethodTestSuite) TestTailnetFunctions() {
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionCreate).
|
||||
Errors(dbmem.ErrUnimplemented)
|
||||
}))
|
||||
s.Run("UpdateTailnetPeerStatusByCoordinator", s.Subtest(func(_ database.Store, check *expects) {
|
||||
check.Args(database.UpdateTailnetPeerStatusByCoordinatorParams{}).
|
||||
Asserts(rbac.ResourceTailnetCoordinator, policy.ActionUpdate).
|
||||
Errors(dbmem.ErrUnimplemented)
|
||||
}))
|
||||
}
|
||||
|
||||
func (s *MethodTestSuite) TestDBCrypt() {
|
||||
@@ -2358,6 +2479,13 @@ func (s *MethodTestSuite) TestSystemFunctions() {
|
||||
JobID: j.ID,
|
||||
}).Asserts( /*rbac.ResourceSystem, policy.ActionCreate*/ )
|
||||
}))
|
||||
s.Run("InsertProvisionerJobTimings", s.Subtest(func(db database.Store, check *expects) {
|
||||
// TODO: we need to create a ProvisionerJob resource
|
||||
j := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{})
|
||||
check.Args(database.InsertProvisionerJobTimingsParams{
|
||||
JobID: j.ID,
|
||||
}).Asserts( /*rbac.ResourceSystem, policy.ActionCreate*/ )
|
||||
}))
|
||||
s.Run("UpsertProvisionerDaemon", s.Subtest(func(db database.Store, check *expects) {
|
||||
org := dbgen.Organization(s.T(), db, database.Organization{})
|
||||
pd := rbac.ResourceProvisionerDaemon.InOrg(org.ID)
|
||||
@@ -2389,7 +2517,7 @@ func (s *MethodTestSuite) TestSystemFunctions() {
|
||||
}).Asserts(rbac.ResourceSystem, policy.ActionCreate)
|
||||
}))
|
||||
s.Run("DeleteOldWorkspaceAgentLogs", s.Subtest(func(db database.Store, check *expects) {
|
||||
check.Args().Asserts(rbac.ResourceSystem, policy.ActionDelete)
|
||||
check.Args(time.Time{}).Asserts(rbac.ResourceSystem, policy.ActionDelete)
|
||||
}))
|
||||
s.Run("InsertWorkspaceAgentStats", s.Subtest(func(db database.Store, check *expects) {
|
||||
check.Args(database.InsertWorkspaceAgentStatsParams{}).Asserts(rbac.ResourceSystem, policy.ActionCreate).Errors(errMatchAny)
|
||||
@@ -2419,10 +2547,10 @@ func (s *MethodTestSuite) TestSystemFunctions() {
|
||||
check.Args(int32(0)).Asserts(rbac.ResourceSystem, policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetAppSecurityKey", s.Subtest(func(db database.Store, check *expects) {
|
||||
check.Args().Asserts()
|
||||
check.Args().Asserts(rbac.ResourceSystem, policy.ActionRead)
|
||||
}))
|
||||
s.Run("UpsertAppSecurityKey", s.Subtest(func(db database.Store, check *expects) {
|
||||
check.Args("").Asserts()
|
||||
check.Args("foo").Asserts(rbac.ResourceSystem, policy.ActionUpdate)
|
||||
}))
|
||||
s.Run("GetApplicationName", s.Subtest(func(db database.Store, check *expects) {
|
||||
db.UpsertApplicationName(context.Background(), "foo")
|
||||
@@ -2462,6 +2590,13 @@ func (s *MethodTestSuite) TestSystemFunctions() {
|
||||
db.UpsertOAuthSigningKey(context.Background(), "foo")
|
||||
check.Args().Asserts(rbac.ResourceSystem, policy.ActionUpdate)
|
||||
}))
|
||||
s.Run("UpsertCoordinatorResumeTokenSigningKey", s.Subtest(func(db database.Store, check *expects) {
|
||||
check.Args("foo").Asserts(rbac.ResourceSystem, policy.ActionUpdate)
|
||||
}))
|
||||
s.Run("GetCoordinatorResumeTokenSigningKey", s.Subtest(func(db database.Store, check *expects) {
|
||||
db.UpsertCoordinatorResumeTokenSigningKey(context.Background(), "foo")
|
||||
check.Args().Asserts(rbac.ResourceSystem, policy.ActionRead)
|
||||
}))
|
||||
s.Run("InsertMissingGroups", s.Subtest(func(db database.Store, check *expects) {
|
||||
check.Args(database.InsertMissingGroupsParams{}).Asserts(rbac.ResourceSystem, policy.ActionCreate).Errors(errMatchAny)
|
||||
}))
|
||||
@@ -2561,6 +2696,10 @@ func (s *MethodTestSuite) TestSystemFunctions() {
|
||||
AgentID: uuid.New(),
|
||||
}).Asserts(tpl, policy.ActionCreate)
|
||||
}))
|
||||
}
|
||||
|
||||
func (s *MethodTestSuite) TestNotifications() {
|
||||
// System functions
|
||||
s.Run("AcquireNotificationMessages", s.Subtest(func(db database.Store, check *expects) {
|
||||
// TODO: update this test once we have a specific role for notifications
|
||||
check.Args(database.AcquireNotificationMessagesParams{}).Asserts(rbac.ResourceSystem, policy.ActionUpdate)
|
||||
@@ -2596,6 +2735,42 @@ func (s *MethodTestSuite) TestSystemFunctions() {
|
||||
Limit: 10,
|
||||
}).Asserts(rbac.ResourceSystem, policy.ActionRead)
|
||||
}))
|
||||
|
||||
// Notification templates
|
||||
s.Run("GetNotificationTemplateByID", s.Subtest(func(db database.Store, check *expects) {
|
||||
user := dbgen.User(s.T(), db, database.User{})
|
||||
check.Args(user.ID).Asserts(rbac.ResourceNotificationTemplate, policy.ActionRead).
|
||||
Errors(dbmem.ErrUnimplemented)
|
||||
}))
|
||||
s.Run("GetNotificationTemplatesByKind", s.Subtest(func(db database.Store, check *expects) {
|
||||
check.Args(database.NotificationTemplateKindSystem).
|
||||
Asserts().
|
||||
Errors(dbmem.ErrUnimplemented)
|
||||
|
||||
// TODO(dannyk): add support for other database.NotificationTemplateKind types once implemented.
|
||||
}))
|
||||
s.Run("UpdateNotificationTemplateMethodByID", s.Subtest(func(db database.Store, check *expects) {
|
||||
check.Args(database.UpdateNotificationTemplateMethodByIDParams{
|
||||
Method: database.NullNotificationMethod{NotificationMethod: database.NotificationMethodWebhook, Valid: true},
|
||||
ID: notifications.TemplateWorkspaceDormant,
|
||||
}).Asserts(rbac.ResourceNotificationTemplate, policy.ActionUpdate).
|
||||
Errors(dbmem.ErrUnimplemented)
|
||||
}))
|
||||
|
||||
// Notification preferences
|
||||
s.Run("GetUserNotificationPreferences", s.Subtest(func(db database.Store, check *expects) {
|
||||
user := dbgen.User(s.T(), db, database.User{})
|
||||
check.Args(user.ID).
|
||||
Asserts(rbac.ResourceNotificationPreference.WithOwner(user.ID.String()), policy.ActionRead)
|
||||
}))
|
||||
s.Run("UpdateUserNotificationPreferences", s.Subtest(func(db database.Store, check *expects) {
|
||||
user := dbgen.User(s.T(), db, database.User{})
|
||||
check.Args(database.UpdateUserNotificationPreferencesParams{
|
||||
UserID: user.ID,
|
||||
NotificationTemplateIds: []uuid.UUID{notifications.TemplateWorkspaceAutoUpdated, notifications.TemplateWorkspaceDeleted},
|
||||
Disableds: []bool{true, false},
|
||||
}).Asserts(rbac.ResourceNotificationPreference.WithOwner(user.ID.String()), policy.ActionUpdate)
|
||||
}))
|
||||
}
|
||||
|
||||
func (s *MethodTestSuite) TestOAuth2ProviderApps() {
|
||||
|
||||
@@ -0,0 +1,164 @@
|
||||
package dbauthz_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbmem"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtestutil"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
)
|
||||
|
||||
// nolint:tparallel
|
||||
func TestGroupsAuth(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if dbtestutil.WillUsePostgres() {
|
||||
t.Skip("this test would take too long to run on postgres")
|
||||
}
|
||||
|
||||
authz := rbac.NewAuthorizer(prometheus.NewRegistry())
|
||||
|
||||
db := dbauthz.New(dbmem.New(), authz, slogtest.Make(t, &slogtest.Options{
|
||||
IgnoreErrors: true,
|
||||
}), coderdtest.AccessControlStorePointer())
|
||||
|
||||
ownerCtx := dbauthz.As(context.Background(), rbac.Subject{
|
||||
ID: "owner",
|
||||
Roles: rbac.Roles(must(rbac.RoleIdentifiers{rbac.RoleOwner()}.Expand())),
|
||||
Groups: []string{},
|
||||
Scope: rbac.ExpandableScope(rbac.ScopeAll),
|
||||
})
|
||||
|
||||
org := dbgen.Organization(t, db, database.Organization{})
|
||||
group := dbgen.Group(t, db, database.Group{
|
||||
OrganizationID: org.ID,
|
||||
})
|
||||
|
||||
var users []database.User
|
||||
for i := 0; i < 5; i++ {
|
||||
user := dbgen.User(t, db, database.User{})
|
||||
users = append(users, user)
|
||||
err := db.InsertGroupMember(ownerCtx, database.InsertGroupMemberParams{
|
||||
UserID: user.ID,
|
||||
GroupID: group.ID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
totalMembers := len(users)
|
||||
testCases := []struct {
|
||||
Name string
|
||||
Subject rbac.Subject
|
||||
ReadGroup bool
|
||||
ReadMembers bool
|
||||
MembersExpected int
|
||||
}{
|
||||
{
|
||||
Name: "Owner",
|
||||
Subject: rbac.Subject{
|
||||
ID: "owner",
|
||||
Roles: rbac.Roles(must(rbac.RoleIdentifiers{rbac.RoleOwner()}.Expand())),
|
||||
Groups: []string{},
|
||||
Scope: rbac.ExpandableScope(rbac.ScopeAll),
|
||||
},
|
||||
ReadGroup: true,
|
||||
ReadMembers: true,
|
||||
MembersExpected: totalMembers,
|
||||
},
|
||||
{
|
||||
Name: "UserAdmin",
|
||||
Subject: rbac.Subject{
|
||||
ID: "useradmin",
|
||||
Roles: rbac.Roles(must(rbac.RoleIdentifiers{rbac.RoleUserAdmin()}.Expand())),
|
||||
Groups: []string{},
|
||||
Scope: rbac.ExpandableScope(rbac.ScopeAll),
|
||||
},
|
||||
ReadGroup: true,
|
||||
ReadMembers: true,
|
||||
MembersExpected: totalMembers,
|
||||
},
|
||||
{
|
||||
Name: "OrgAdmin",
|
||||
Subject: rbac.Subject{
|
||||
ID: "orgadmin",
|
||||
Roles: rbac.Roles(must(rbac.RoleIdentifiers{rbac.ScopedRoleOrgAdmin(org.ID)}.Expand())),
|
||||
Groups: []string{},
|
||||
Scope: rbac.ExpandableScope(rbac.ScopeAll),
|
||||
},
|
||||
ReadGroup: true,
|
||||
ReadMembers: true,
|
||||
MembersExpected: totalMembers,
|
||||
},
|
||||
{
|
||||
Name: "OrgUserAdmin",
|
||||
Subject: rbac.Subject{
|
||||
ID: "orgUserAdmin",
|
||||
Roles: rbac.Roles(must(rbac.RoleIdentifiers{rbac.ScopedRoleOrgUserAdmin(org.ID)}.Expand())),
|
||||
Groups: []string{},
|
||||
Scope: rbac.ExpandableScope(rbac.ScopeAll),
|
||||
},
|
||||
ReadGroup: true,
|
||||
ReadMembers: true,
|
||||
MembersExpected: totalMembers,
|
||||
},
|
||||
{
|
||||
Name: "GroupMember",
|
||||
Subject: rbac.Subject{
|
||||
ID: users[0].ID.String(),
|
||||
Roles: rbac.Roles(must(rbac.RoleIdentifiers{rbac.RoleMember(), rbac.ScopedRoleOrgMember(org.ID)}.Expand())),
|
||||
Groups: []string{
|
||||
group.ID.String(),
|
||||
},
|
||||
Scope: rbac.ExpandableScope(rbac.ScopeAll),
|
||||
},
|
||||
ReadGroup: true,
|
||||
ReadMembers: true,
|
||||
MembersExpected: 1,
|
||||
},
|
||||
{
|
||||
// Org admin in the incorrect organization
|
||||
Name: "DifferentOrgAdmin",
|
||||
Subject: rbac.Subject{
|
||||
ID: "orgadmin",
|
||||
Roles: rbac.Roles(must(rbac.RoleIdentifiers{rbac.ScopedRoleOrgUserAdmin(uuid.New())}.Expand())),
|
||||
Groups: []string{},
|
||||
Scope: rbac.ExpandableScope(rbac.ScopeAll),
|
||||
},
|
||||
ReadGroup: false,
|
||||
ReadMembers: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actorCtx := dbauthz.As(context.Background(), tc.Subject)
|
||||
_, err := db.GetGroupByID(actorCtx, group.ID)
|
||||
if tc.ReadGroup {
|
||||
require.NoError(t, err, "group read")
|
||||
} else {
|
||||
require.Error(t, err, "group read")
|
||||
}
|
||||
|
||||
members, err := db.GetGroupMembersByGroupID(actorCtx, group.ID)
|
||||
if tc.ReadMembers {
|
||||
require.NoError(t, err, "member read")
|
||||
require.Len(t, members, tc.MembersExpected, "member count found does not match")
|
||||
} else {
|
||||
require.Len(t, members, 0, "member count is not 0")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -114,9 +114,7 @@ func (s *MethodTestSuite) Subtest(testCaseF func(db database.Store, check *expec
|
||||
s.methodAccounting[methodName]++
|
||||
|
||||
db := dbmem.New()
|
||||
fakeAuthorizer := &coderdtest.FakeAuthorizer{
|
||||
AlwaysReturn: nil,
|
||||
}
|
||||
fakeAuthorizer := &coderdtest.FakeAuthorizer{}
|
||||
rec := &coderdtest.RecordingAuthorizer{
|
||||
Wrapped: fakeAuthorizer,
|
||||
}
|
||||
@@ -174,7 +172,11 @@ func (s *MethodTestSuite) Subtest(testCaseF func(db database.Store, check *expec
|
||||
// Always run
|
||||
s.Run("Success", func() {
|
||||
rec.Reset()
|
||||
fakeAuthorizer.AlwaysReturn = nil
|
||||
if testCase.successAuthorizer != nil {
|
||||
fakeAuthorizer.ConditionalReturn = testCase.successAuthorizer
|
||||
} else {
|
||||
fakeAuthorizer.AlwaysReturn(nil)
|
||||
}
|
||||
|
||||
outputs, err := callMethod(ctx)
|
||||
if testCase.err == nil {
|
||||
@@ -232,7 +234,7 @@ func (s *MethodTestSuite) NoActorErrorTest(callMethod func(ctx context.Context)
|
||||
// Asserts that the error returned is a NotAuthorizedError.
|
||||
func (s *MethodTestSuite) NotAuthorizedErrorTest(ctx context.Context, az *coderdtest.FakeAuthorizer, testCase expects, callMethod func(ctx context.Context) ([]reflect.Value, error)) {
|
||||
s.Run("NotAuthorized", func() {
|
||||
az.AlwaysReturn = rbac.ForbiddenWithInternal(xerrors.New("Always fail authz"), rbac.Subject{}, "", rbac.Object{}, nil)
|
||||
az.AlwaysReturn(rbac.ForbiddenWithInternal(xerrors.New("Always fail authz"), rbac.Subject{}, "", rbac.Object{}, nil))
|
||||
|
||||
// If we have assertions, that means the method should FAIL
|
||||
// if RBAC will disallow the request. The returned error should
|
||||
@@ -257,8 +259,8 @@ func (s *MethodTestSuite) NotAuthorizedErrorTest(ctx context.Context, az *coderd
|
||||
// Pass in a canceled context
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
cancel()
|
||||
az.AlwaysReturn = rbac.ForbiddenWithInternal(&topdown.Error{Code: topdown.CancelErr},
|
||||
rbac.Subject{}, "", rbac.Object{}, nil)
|
||||
az.AlwaysReturn(rbac.ForbiddenWithInternal(&topdown.Error{Code: topdown.CancelErr},
|
||||
rbac.Subject{}, "", rbac.Object{}, nil))
|
||||
|
||||
// If we have assertions, that means the method should FAIL
|
||||
// if RBAC will disallow the request. The returned error should
|
||||
@@ -324,6 +326,7 @@ type expects struct {
|
||||
// instead.
|
||||
notAuthorizedExpect string
|
||||
cancelledCtxExpect string
|
||||
successAuthorizer func(ctx context.Context, subject rbac.Subject, action policy.Action, obj rbac.Object) error
|
||||
}
|
||||
|
||||
// Asserts is required. Asserts the RBAC authorize calls that should be made.
|
||||
@@ -354,6 +357,23 @@ func (m *expects) Errors(err error) *expects {
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *expects) FailSystemObjectChecks() *expects {
|
||||
return m.WithSuccessAuthorizer(func(ctx context.Context, subject rbac.Subject, action policy.Action, obj rbac.Object) error {
|
||||
if obj.Type == rbac.ResourceSystem.Type {
|
||||
return xerrors.Errorf("hard coded system authz failed")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// WithSuccessAuthorizer is helpful when an optimization authz check is made
|
||||
// to skip some RBAC checks. This check in testing would prevent the ability
|
||||
// to assert the more nuanced RBAC checks.
|
||||
func (m *expects) WithSuccessAuthorizer(f func(ctx context.Context, subject rbac.Subject, action policy.Action, obj rbac.Object) error) *expects {
|
||||
m.successAuthorizer = f
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *expects) WithNotAuthorized(contains string) *expects {
|
||||
m.notAuthorizedExpect = contains
|
||||
return m
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"database/sql"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
@@ -374,18 +375,61 @@ func Group(t testing.TB, db database.Store, orig database.Group) database.Group
|
||||
return group
|
||||
}
|
||||
|
||||
func GroupMember(t testing.TB, db database.Store, orig database.GroupMember) database.GroupMember {
|
||||
member := database.GroupMember{
|
||||
UserID: takeFirst(orig.UserID, uuid.New()),
|
||||
GroupID: takeFirst(orig.GroupID, uuid.New()),
|
||||
}
|
||||
// GroupMember requires a user + group to already exist.
|
||||
// Example for creating a group member for a random group + user.
|
||||
//
|
||||
// GroupMember(t, db, database.GroupMemberTable{
|
||||
// UserID: User(t, db, database.User{}).ID,
|
||||
// GroupID: Group(t, db, database.Group{
|
||||
// OrganizationID: must(db.GetDefaultOrganization(genCtx)).ID,
|
||||
// }).ID,
|
||||
// })
|
||||
func GroupMember(t testing.TB, db database.Store, member database.GroupMemberTable) database.GroupMember {
|
||||
require.NotEqual(t, member.UserID, uuid.Nil, "A user id is required to use 'dbgen.GroupMember', use 'dbgen.User'.")
|
||||
require.NotEqual(t, member.GroupID, uuid.Nil, "A group id is required to use 'dbgen.GroupMember', use 'dbgen.Group'.")
|
||||
|
||||
//nolint:gosimple
|
||||
err := db.InsertGroupMember(genCtx, database.InsertGroupMemberParams{
|
||||
UserID: member.UserID,
|
||||
GroupID: member.GroupID,
|
||||
})
|
||||
require.NoError(t, err, "insert group member")
|
||||
return member
|
||||
|
||||
user, err := db.GetUserByID(genCtx, member.UserID)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
require.NoErrorf(t, err, "'dbgen.GroupMember' failed as the user with id %s does not exist. A user is required to use this function, use 'dbgen.User'.", member.UserID)
|
||||
}
|
||||
require.NoError(t, err, "get user by id")
|
||||
|
||||
group, err := db.GetGroupByID(genCtx, member.GroupID)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
require.NoErrorf(t, err, "'dbgen.GroupMember' failed as the group with id %s does not exist. A group is required to use this function, use 'dbgen.Group'.", member.GroupID)
|
||||
}
|
||||
require.NoError(t, err, "get group by id")
|
||||
|
||||
groupMember := database.GroupMember{
|
||||
UserID: user.ID,
|
||||
UserEmail: user.Email,
|
||||
UserUsername: user.Username,
|
||||
UserHashedPassword: user.HashedPassword,
|
||||
UserCreatedAt: user.CreatedAt,
|
||||
UserUpdatedAt: user.UpdatedAt,
|
||||
UserStatus: user.Status,
|
||||
UserRbacRoles: user.RBACRoles,
|
||||
UserLoginType: user.LoginType,
|
||||
UserAvatarUrl: user.AvatarURL,
|
||||
UserDeleted: user.Deleted,
|
||||
UserLastSeenAt: user.LastSeenAt,
|
||||
UserQuietHoursSchedule: user.QuietHoursSchedule,
|
||||
UserThemePreference: user.ThemePreference,
|
||||
UserName: user.Name,
|
||||
UserGithubComUserID: user.GithubComUserID,
|
||||
OrganizationID: group.OrganizationID,
|
||||
GroupName: group.Name,
|
||||
GroupID: group.ID,
|
||||
}
|
||||
|
||||
return groupMember
|
||||
}
|
||||
|
||||
// ProvisionerJob is a bit more involved to get the values such as "completedAt", "startedAt", "cancelledAt" set. ps
|
||||
@@ -836,7 +880,7 @@ func OAuth2ProviderAppToken(t testing.TB, db database.Store, seed database.OAuth
|
||||
}
|
||||
|
||||
func CustomRole(t testing.TB, db database.Store, seed database.CustomRole) database.CustomRole {
|
||||
role, err := db.UpsertCustomRole(genCtx, database.UpsertCustomRoleParams{
|
||||
role, err := db.InsertCustomRole(genCtx, database.InsertCustomRoleParams{
|
||||
Name: takeFirst(seed.Name, strings.ToLower(testutil.GetRandomName(t))),
|
||||
DisplayName: testutil.GetRandomName(t),
|
||||
OrganizationID: seed.OrganizationID,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user